SunSec commited on
Commit
4ac1fc5
·
verified ·
1 Parent(s): ad44746

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +14 -0
  2. deep_search/DeepResearcher/docker/Dockerfile.megatron +9 -0
  3. deep_search/DeepResearcher/docker/Dockerfile.ngc.vllm +47 -0
  4. deep_search/DeepResearcher/docker/Dockerfile.vemlp.vllm.te +41 -0
  5. deep_search/DeepResearcher/docs/Makefile +20 -0
  6. deep_search/DeepResearcher/docs/README.md +19 -0
  7. deep_search/DeepResearcher/docs/README_vllm0.7.md +71 -0
  8. deep_search/DeepResearcher/docs/advance/dpo_extension.rst +271 -0
  9. deep_search/DeepResearcher/docs/advance/fsdp_extension.rst +95 -0
  10. deep_search/DeepResearcher/docs/advance/megatron_extension.rst +26 -0
  11. deep_search/DeepResearcher/docs/advance/placement.rst +11 -0
  12. deep_search/DeepResearcher/docs/conf.py +83 -0
  13. deep_search/DeepResearcher/docs/data.rst +59 -0
  14. deep_search/DeepResearcher/docs/examples/config.rst +361 -0
  15. deep_search/DeepResearcher/docs/examples/gsm8k_example.rst +165 -0
  16. deep_search/DeepResearcher/docs/examples/ppo_code_architecture.rst +207 -0
  17. deep_search/DeepResearcher/docs/experiment/ppo.rst +45 -0
  18. deep_search/DeepResearcher/docs/faq/faq.rst +62 -0
  19. deep_search/DeepResearcher/docs/hybrid_flow.rst +269 -0
  20. deep_search/DeepResearcher/docs/index.rst +119 -0
  21. deep_search/DeepResearcher/docs/perf/perf_tuning.rst +164 -0
  22. deep_search/DeepResearcher/docs/preparation/prepare_data.rst +126 -0
  23. deep_search/DeepResearcher/docs/preparation/reward_function.rst +46 -0
  24. deep_search/DeepResearcher/docs/requirements-docs.txt +12 -0
  25. deep_search/DeepResearcher/docs/start/install.rst +114 -0
  26. deep_search/DeepResearcher/docs/start/quickstart.rst +141 -0
  27. deep_search/DeepResearcher/docs/workers/fsdp_workers.rst +142 -0
  28. deep_search/DeepResearcher/docs/workers/megatron_workers.rst +200 -0
  29. deep_search/DeepResearcher/docs/workers/ray_trainer.rst +243 -0
  30. deep_search/DeepResearcher/evaluate/cacluate_metrics.py +283 -0
  31. deep_search/DeepResearcher/verl/single_controller/__init__.py +26 -0
  32. deep_search/DeepResearcher/verl/single_controller/base/__init__.py +18 -0
  33. deep_search/DeepResearcher/verl/single_controller/base/decorator.py +410 -0
  34. deep_search/DeepResearcher/verl/single_controller/base/megatron/__init__.py +13 -0
  35. deep_search/DeepResearcher/verl/single_controller/base/megatron/worker.py +37 -0
  36. deep_search/DeepResearcher/verl/single_controller/base/megatron/worker_group.py +51 -0
  37. deep_search/DeepResearcher/verl/single_controller/base/register_center/__init__.py +13 -0
  38. deep_search/DeepResearcher/verl/single_controller/base/register_center/ray.py +29 -0
  39. deep_search/DeepResearcher/verl/single_controller/base/worker.py +185 -0
  40. deep_search/DeepResearcher/verl/single_controller/base/worker_group.py +198 -0
  41. deep_search/DeepResearcher/verl/single_controller/ray/__init__.py +15 -0
  42. deep_search/DeepResearcher/verl/single_controller/ray/base.py +459 -0
  43. deep_search/DeepResearcher/verl/single_controller/ray/megatron.py +62 -0
  44. deep_search/DeepResearcher/verl/utils/checkpoint/__init__.py +13 -0
  45. deep_search/DeepResearcher/verl/utils/checkpoint/checkpoint_manager.py +138 -0
  46. deep_search/DeepResearcher/verl/utils/checkpoint/fsdp_checkpoint_manager.py +159 -0
  47. deep_search/DeepResearcher/verl/utils/debug/__init__.py +15 -0
  48. deep_search/DeepResearcher/verl/utils/debug/performance.py +30 -0
  49. deep_search/DeepResearcher/verl/utils/debug/trajectory_tracker.py +108 -0
  50. deep_search/DeepResearcher/verl/utils/reward_score/prime_code/__init__.py +73 -0
.gitattributes CHANGED
@@ -2239,3 +2239,17 @@ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_
2239
  deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_4.json filter=lfs diff=lfs merge=lfs -text
2240
  deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_7.json filter=lfs diff=lfs merge=lfs -text
2241
  deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_12.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2239
  deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_4.json filter=lfs diff=lfs merge=lfs -text
2240
  deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_1/turn_7.json filter=lfs diff=lfs merge=lfs -text
2241
  deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_12.json filter=lfs diff=lfs merge=lfs -text
2242
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_8.json filter=lfs diff=lfs merge=lfs -text
2243
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_6.json filter=lfs diff=lfs merge=lfs -text
2244
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_5.json filter=lfs diff=lfs merge=lfs -text
2245
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_9.json filter=lfs diff=lfs merge=lfs -text
2246
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_7.json filter=lfs diff=lfs merge=lfs -text
2247
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_10.json filter=lfs diff=lfs merge=lfs -text
2248
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_4.json filter=lfs diff=lfs merge=lfs -text
2249
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_11.json filter=lfs diff=lfs merge=lfs -text
2250
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_1.json filter=lfs diff=lfs merge=lfs -text
2251
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_3.json filter=lfs diff=lfs merge=lfs -text
2252
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/turn_2.json filter=lfs diff=lfs merge=lfs -text
2253
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_9/gen.4.22,11:6.info_extract.json filter=lfs diff=lfs merge=lfs -text
2254
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_2/turn_12.json filter=lfs diff=lfs merge=lfs -text
2255
+ deep_search/search_o1/output/output_sum_all_webpage_gen_data/outputs_17w_select_3k_for_dpo_split_4/rollout_2/gen.4.9,15:24.json filter=lfs diff=lfs merge=lfs -text
deep_search/DeepResearcher/docker/Dockerfile.megatron ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ FROM verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
2
+
3
+ RUN pip install git+https://github.com/NVIDIA/TransformerEngine.git@stable
4
+
5
+ RUN cd /opt/nvidia && git clone --single-branch --branch core_r0.11.0 https://github.com/NVIDIA/Megatron-LM.git Megatron-LM
6
+
7
+ # only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed
8
+ # unset for now
9
+ RUN cd /opt/nvidia/Megatron-LM && pip3 install --no-deps -e .
deep_search/DeepResearcher/docker/Dockerfile.ngc.vllm ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # docker buildx build --platform linux/x86_64 -t "verlai/verl:ngc-th2.4.0-cu124-vllm0.6.3-ray2.4-te1.7-v0.0.6" -f docker/Dockerfile.ngc.vllm . --builder cloud-verlai-verl-builder --progress=plain --push
2
+ FROM nvcr.io/nvidia/pytorch:24.05-py3
3
+
4
+ # uninstall nv-pytorch fork
5
+ RUN pip3 uninstall pytorch-quantization \
6
+ pytorch-triton \
7
+ torch \
8
+ torch-tensorrt \
9
+ torchvision \
10
+ xgboost transformer_engine flash_attn \
11
+ apex megatron-core -y
12
+
13
+ RUN pip3 install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124
14
+
15
+ # =============== Megatron dependencies (optional) =================
16
+ # install apex, set MAX_JOBS to avoid OOMs
17
+ RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \
18
+ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
19
+ git+https://github.com/NVIDIA/apex
20
+ # =============== End of Megatron dependencies (optional) =================
21
+
22
+ RUN pip3 install --no-cache-dir \
23
+ accelerate \
24
+ codetiming \
25
+ datasets \
26
+ dill \
27
+ hydra-core \
28
+ numpy \
29
+ 'pandas' \
30
+ 'peft' \
31
+ 'pyarrow>=15.0.0' \
32
+ 'pybind11' \
33
+ 'pylatexenc' \
34
+ 'ray>=2.10' \
35
+ 'tensordict<0.6' \
36
+ 'transformers' \
37
+ 'vllm==0.6.3.post1' \
38
+ 'wandb'
39
+
40
+ # full dependencies
41
+ RUN pip3 install pytest yapf py-spy pyext liger-kernel
42
+
43
+ # =============== Megatron dependencies (optional) =================
44
+ # install Transformer Engine, which requires FA 2.5.8. Do it in a separate step for docker cache
45
+ RUN MAX_JOBS=4 NINJA_FLAGS="-j4" pip3 install flash-attn==2.5.8 --no-cache-dir --no-build-isolation
46
+ RUN MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0
47
+ # =============== End of Megatron dependencies (optional) =================
deep_search/DeepResearcher/docker/Dockerfile.vemlp.vllm.te ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # docker buildx build --platform linux/x86_64 -t "verlai/verl:$TAG" -f docker/$FILE .
2
+
3
+ # the one in docker.io is an alias for the one veturbo
4
+ # FROM vemlp-cn-beijing.cr.volces.com/veturbo/pytorch:2.4-cu124
5
+ FROM docker.io/haibinlin/verl:v0.0.5-th2.4.0-cu124-base
6
+
7
+ # only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed
8
+ # unset for now
9
+ RUN pip3 config unset global.index-url
10
+
11
+ # transformers 4.47.0 contains the following bug:
12
+ # AttributeError: 'Gemma2Attention' object has no attribute '_flash_attn_uses_top_left_mask'
13
+ RUN pip3 install --no-cache-dir \
14
+ torch==2.4.0 \
15
+ accelerate \
16
+ codetiming \
17
+ dill \
18
+ hydra-core \
19
+ numpy \
20
+ pybind11 \
21
+ tensordict \
22
+ "transformers <= 4.46.0"
23
+
24
+ RUN pip3 install --no-cache-dir flash-attn==2.7.0.post2 --no-build-isolation
25
+
26
+ # vllm depends on ray, and veRL does not support ray > 2.37
27
+ RUN pip3 install --no-cache-dir vllm==0.6.3 ray==2.10
28
+
29
+ # install apex
30
+ RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \
31
+ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
32
+ git+https://github.com/NVIDIA/apex
33
+
34
+ # install Transformer Engine
35
+ # - flash-attn pinned to 2.5.3 by TransformerEngine, switch to eric-haibin-lin/TransformerEngine.git@v1.7.0 to relax version req
36
+ # - install with: MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 to avoid OOM
37
+ # - cudnn is required by TransformerEngine
38
+ # RUN CUDNN_PATH=/opt/conda/lib/python3.11/site-packages/nvidia/cudnn \
39
+ # pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0
40
+ RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install flash-attn==2.5.3 --no-cache-dir --no-build-isolation
41
+ RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install git+https://github.com/NVIDIA/TransformerEngine.git@v1.7
deep_search/DeepResearcher/docs/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line.
5
+ SPHINXOPTS =
6
+ SPHINXBUILD = sphinx-build
7
+ SPHINXPROJ = verl
8
+ SOURCEDIR = .
9
+ BUILDDIR = _build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
deep_search/DeepResearcher/docs/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # verl documents
2
+
3
+ ## Build the docs
4
+
5
+ ```bash
6
+ # Install dependencies.
7
+ pip install -r requirements-docs.txt
8
+
9
+ # Build the docs.
10
+ make clean
11
+ make html
12
+ ```
13
+
14
+ ## Open the docs with your browser
15
+
16
+ ```bash
17
+ python -m http.server -d _build/html/
18
+ ```
19
+ Launch your browser and open localhost:8000.
deep_search/DeepResearcher/docs/README_vllm0.7.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Upgrading to vllm >= 0.7
2
+
3
+ ## Installation
4
+
5
+ Note: This version of veRL+vllm 0.7+ supports **FSDP** for training and **vLLM** for rollout.
6
+
7
+ ```
8
+ # Create the conda environment
9
+ conda create -n verl python==3.10
10
+ conda activate verl
11
+
12
+ # Install verl
13
+ git clone https://github.com/volcengine/verl.git
14
+ cd verl
15
+ pip3 install -e .
16
+
17
+ # Install the latest stable version of vLLM
18
+ pip3 install vllm==0.7.3
19
+
20
+ # Install flash-attn
21
+ pip3 install flash-attn --no-build-isolation
22
+
23
+ ```
24
+
25
+ Note that if you are installing lower versions of vLLM (0.7.0, 0.7.1, 0.7.2), you need to make some tiny patches manually on vllm (/path/to/site-packages/vllm after installation) after the above steps:
26
+
27
+ - vllm/distributed/parallel_state.py: Remove the assertion below:
28
+
29
+ ```
30
+ if (world_size
31
+ != tensor_model_parallel_size * pipeline_model_parallel_size):
32
+ raise RuntimeError(
33
+ f"world_size ({world_size}) is not equal to "
34
+ f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
35
+ f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
36
+
37
+ ```
38
+
39
+ - vllm/executor/uniproc_executor.py: change `local_rank = rank` to `local_rank = int(os.environ["LOCAL_RANK"])`
40
+ - vllm/model_executor/model_loader/weight_utils.py: remove the `torch.cuda.empty_cache()` in `pt_weights_iterator`
41
+
42
+ ## Features
43
+
44
+ ### Use cuda graph
45
+
46
+ After installation, examples using FSDP as training backends can be used. By default, the `enforce_eager` is set to True, which disables the cuda graph. To enjoy cuda graphs and the sleep mode of vLLM>=0.7, add the following lines to the bash script:
47
+
48
+ ```
49
+ actor_rollout_ref.rollout.enforce_eager=False \
50
+ actor_rollout_ref.rollout.free_cache_engine=False \
51
+
52
+ ```
53
+
54
+ For a typical job like examples/ppo_trainer/run_qwen2-7b_seq_balance.sh, the rollout generation time is 115 seconds with vLLM0.6.3, while it is 85 seconds with vLLM0.7.0. By enabling the cudagraph, the generation duration is further reduced to 62 seconds.
55
+
56
+ **Note:** Currently, if the `n` is greater than 1 in `SamplingParams` in vLLM>=0.7, there is a potential performance issue on the stability of rollout generation time (Some iterations would see generation time bursts) using vLLM's V0 Engine.
57
+
58
+ ### Use vLLM V1 Engine
59
+
60
+ Using the vLLM V1 engine can avoid instability issues and achieve additional performance improvements. To use the V1 engine, you can first uninstall the previously installed vLLM and then follow the steps below to install the newer version.
61
+
62
+ ```
63
+ git clone https://github.com/vllm-project/vllm.git
64
+ cd vllm
65
+ git checkout 2275784
66
+ sed -i "903a\ data_parallel_size = world_size // pipeline_model_parallel_size // tensor_model_parallel_size" ./vllm/distributed/parallel_state.py
67
+ VLLM_USE_PRECOMPILED=1 pip install --editable .
68
+ ```
69
+
70
+ Then you can enable the V1 engine by setting `export VLLM_USE_V1=1`. In some benchmark tests, the V1 engine demonstrates a 1.5x speed improvement over the vLLM V0 engine.
71
+ The stable support of the vLLM V1 engine will come soon.
deep_search/DeepResearcher/docs/advance/dpo_extension.rst ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Extend to other RL(HF) algorithms
2
+ =================================
3
+
4
+ We already implemented the complete training pipeline of the PPO
5
+ algorithms. To extend to other algorithms, we analyze the high-level
6
+ principle to use verl and provide a tutorial to implement the DPO
7
+ algorithm. Users can follow the similar paradigm to extend to other RL algorithms.
8
+
9
+ .. note:: **Key ideas**: Single process drives multi-process computation and data communication.
10
+
11
+ Overall Approach
12
+ ----------------
13
+
14
+ Step 1: Consider what multi-machine multi-GPU computations are needed
15
+ for each model, such as ``generate_sequence`` , ``compute_log_prob`` and
16
+ ``update_policy`` in the actor_rollout model. Implement distributed
17
+ single-process-multiple-data (SPMD) computation and encapsulate them
18
+ into APIs
19
+
20
+ Step 2: Based on different distributed scenarios, including FSDP and 3D
21
+ parallelism in Megatron-LM, implement single-process control of data
22
+ interaction among multi-process computations.
23
+
24
+ Step 3: Utilize the encapsulated APIs to implement the control flow
25
+
26
+ Example: Online DPO
27
+ -------------------
28
+
29
+ We use verl to implement a simple online DPO algorithm. The algorithm
30
+ flow of Online DPO is as follows:
31
+
32
+ 1. There is a prompt (rollout) generator which has the same weight as
33
+ the actor model. After a batch of prompts are fed into the generator,
34
+ it generates N responses for each prompt.
35
+ 2. Send all the prompts + responses to a verifier for scoring, which can
36
+ be reward model or a rule-based function. Then sort them in pairs to
37
+ form a training batch.
38
+ 3. Use this training batch to train the actor model using DPO. During
39
+ the process, a reference policy is needed.
40
+
41
+ Step 1: What are the multi-machine multi-GPU computations
42
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43
+
44
+ **Sample Generator**
45
+
46
+ Implementation details:
47
+
48
+ .. code:: python
49
+
50
+ from verl.single_controller.base import Worker
51
+ from verl.single_controller.ray import RayWorkerGroup, RayClassWithInitArgs, RayResourcePool
52
+ import ray
53
+
54
+ @ray.remote
55
+ class SampleGenerator(Worker):
56
+ def __init__(self, config):
57
+ super().__init__()
58
+ self.config = config
59
+
60
+ def generate_sequences(self, data):
61
+ pass
62
+
63
+ Here, ``SampleGenerator`` can be viewed as a multi-process pulled up by
64
+ ``torchrun``, with each process running the same code (SPMD).
65
+ ``SampleGenerator`` needs to implement a ``generate_sequences`` API for
66
+ the control flow to call. The implementation details inside can use any
67
+ inference engine including vllm, sglang and huggingface. Users can
68
+ largely reuse the code in
69
+ verl/verl/workers/rollout/vllm_rollout/vllm_rollout.py and we won't
70
+ go into details here.
71
+
72
+ **ReferencePolicy inference**
73
+
74
+ API: compute reference log probability
75
+
76
+ .. code:: python
77
+
78
+ from verl.single_controller.base import Worker
79
+ import ray
80
+
81
+ @ray.remote
82
+ class ReferencePolicy(Worker):
83
+ def __init__(self):
84
+ super().__init__()
85
+ self.model = Model()
86
+
87
+ def infer(self, data):
88
+ return self.model(data)
89
+
90
+ **Actor update**
91
+
92
+ API: Update actor model parameters
93
+
94
+ .. code:: python
95
+
96
+ from verl.single_controller.base import Worker
97
+ import ray
98
+
99
+ @ray.remote
100
+ class DPOActor(Worker):
101
+ def __init__(self):
102
+ super().__init__()
103
+ self.model = Model()
104
+ self.model = FSDP(self.model) # or other distributed strategy
105
+ self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
106
+ self.loss_fn = xxx
107
+
108
+ def update(self, data):
109
+ self.optimizer.zero_grad()
110
+ logits = self.model(data)
111
+ loss = self.loss_fn(logits)
112
+ loss.backward()
113
+ self.optimizer.step()
114
+
115
+ **Notes: How to distinguish between control processes and distributed computation processes**
116
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
117
+
118
+ - Control processes are generally functions directly decorated with
119
+ ``@ray.remote``
120
+ - Computation processes are all wrapped into a ``RayWorkerGroup``.
121
+
122
+ Users can reuse most of the distribtued computation logics implemented
123
+ in PPO algorithm, including FSDP and Megatron-LM backend in
124
+ verl/verl/trainer/ppo.
125
+
126
+ Step 2: Based on different distributed scenarios, implement single-process control of multi-process data interaction
127
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
128
+
129
+ **The core problem to solve here is how a single process sends data to
130
+ multiple processes, drives multi-process computation, and how the
131
+ control process obtains the results of multi-process computation.**
132
+ First, we initialize the multi-process ``WorkerGroup`` in the control
133
+ process.
134
+
135
+ .. code:: python
136
+
137
+ @ray.remote(num_cpus=1)
138
+ def main_task(config):
139
+ # construct SampleGenerator
140
+ resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs
141
+ ray_cls = RayClassWithInitArgs(SampleGenerator, config=config)
142
+ # put SampleGenerator onto resource pool
143
+ worker_group = RayWorkerGroup(resource_pool, ray_cls)
144
+
145
+ # construct reference policy
146
+
147
+ As we can see, in the control process, multiple processes are wrapped
148
+ into a ``RayWorkerGroup``. Inside this ``WorkerGroup``, there is a
149
+ ``self._workers`` member, where each worker is a RayActor
150
+ (https://docs.ray.io/en/latest/ray-core/actors.html) of SampleGenerator.
151
+ ray_trainer.md also provide an implementation of
152
+ ``MegatronRayWorkerGroup``.
153
+
154
+ Assuming the model is distributed using FSDP, and there is a batch of
155
+ data on the control process, for data parallelism, the underlying
156
+ calling process is:
157
+
158
+ .. code:: python
159
+
160
+ data = xxx
161
+ data_list = data.chunk(dp_size)
162
+
163
+ output = []
164
+ for d in data_list:
165
+ # worker_group._workers[i] is a SampleGenerator
166
+ output.append(worker_group._workers[i].generate_sequences.remote(d))
167
+
168
+ output = ray.get(output)
169
+ output = torch.cat(output)
170
+
171
+ Single process calling multiple processes involves the following 3
172
+ steps:
173
+
174
+ 1. Split the data into DP parts on the control process.
175
+ 2. Send the data to remote, call the remote computation through RPC, and
176
+ utilize multi-process computation.
177
+ 3. Obtain the computation results of each worker on the control process
178
+ and merge them.
179
+
180
+ Frequently calling these 3 steps on the controller process greatly hurts
181
+ code readability. **In verl, we have abstracted and encapsulated these 3
182
+ steps, so that the worker's method + dispatch + collect can be
183
+ registered into the worker_group**
184
+
185
+ .. code:: python
186
+
187
+ from verl.single_controller.base.decorator import register
188
+
189
+ def dispatch_data(worker_group, data):
190
+ return data.chunk(worker_group.world_size)
191
+
192
+ def collect_data(worker_group, data):
193
+ return torch.cat(data)
194
+
195
+ dispatch_mode = {
196
+ 'dispatch_fn': dispatch_data,
197
+ 'collect_fn': collect_data
198
+ }
199
+
200
+ @register(dispatch_mode=dispatch_mode)
201
+ def generate_sequences(self, data):
202
+ pass
203
+
204
+ In this way, we can directly call the method inside the worker through
205
+ the ``worker_group`` on the control (driver) process (which is a single
206
+ process):
207
+
208
+ .. code:: python
209
+
210
+ output = worker_group.generate_sequences(data)
211
+
212
+ This single line includes data splitting, data distribution and
213
+ computation, and data collection.
214
+
215
+ Furthermore, the model parallelism size of each model is usually fixed,
216
+ including dp, tp, pp. So for these common distributed scenarios, we have
217
+ pre-implemented specific dispatch and collect methods,in `decorator.py <https://github.com/volcengine/verl/blob/main/verl/single_controller/base/decorator.py>`_, which can be directly used to wrap the computations.
218
+
219
+ .. code:: python
220
+
221
+ from verl.single_controller.base.decorator import register, Dispatch
222
+
223
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
224
+ def generate_sequences(self, data: DataProto) -> DataProto:
225
+ pass
226
+
227
+ Here it requires the data interface to be ``DataProto``. Definition of
228
+ ``DataProto`` is in `protocol.py <https://github.com/volcengine/verl/blob/main/verl/protocol.py>`_.
229
+
230
+ Step 3: Main training loop
231
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
232
+
233
+ With the above training flows, we can implement the algorithm's control
234
+ flow. It is recommended that ``main_task`` is also a ray remote process.
235
+
236
+ .. code:: python
237
+
238
+ @ray.remote(num_cpus=1)
239
+ def main_task(config):
240
+ # construct SampleGenerator
241
+ resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs
242
+ ray_cls = RayClassWithInitArgs(SampleGenerator, config=config)
243
+ # put SampleGenerator onto resource pool
244
+ sample_gen = RayWorkerGroup(resource_pool, ray_cls)
245
+
246
+ # construct reference policy
247
+ ray_cls = RayClassWithInitArgs(ReferencePolicy)
248
+ ref_policy = RayWorkerGroup(resource_pool, ray_cls)
249
+
250
+ # construct actor
251
+ ray_cls = RayClassWithInitArgs(DPOActor)
252
+ dpo_policy = RayWorkerGroup(resource_pool, ray_cls)
253
+
254
+ dataloader = DataLoader()
255
+
256
+ for data in dataloader:
257
+ # generate data
258
+ data = sample_gen.generate_sequences(data)
259
+ # generate scores for each data
260
+ data = generate_scores(data)
261
+ # generate pairwise data using scores
262
+ data = generate_pairwise_data(data)
263
+ # generate ref_log_prob
264
+ data.batch['ref_log_prob'] = ref_policy.infer(data)
265
+ # update using dpo
266
+ dpo_policy.update(data)
267
+ # logging
268
+
269
+ Here, different ``WorkerGroups`` can be placed in the same resource pool or
270
+ in different resource pools using ``create_colocated_worker_cls``
271
+ similar as in `ray_trainer.py <https://github.com/volcengine/verl/blob/main/verl/trainer/ppo/ray_trainer.py>`_.
deep_search/DeepResearcher/docs/advance/fsdp_extension.rst ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Add models with the FSDP backend
3
+ ==================================
4
+
5
+ Model
6
+ --------------------------
7
+
8
+ In principle, our FSDP backend can support any HF model and we can
9
+ sychronoize the actor model weight with vLLM using `hf_weight_loader.py <https://github.com/volcengine/verl/blob/main/verl/third_party/vllm/vllm_v_0_6_3/hf_weight_loader.py>`_.
10
+ However, ``hf_weight_loader`` is will gather the full state_dict of a
11
+ model during synchronization, which may cause OOM. We suggest using
12
+ ``dtensor_weight_loader`` which gather the full model parameter layer by
13
+ layer to reduce the peak memory usage. We already support dtensor weight
14
+ loader for the models below in `dtensor_weight_loader.py <https://github.com/volcengine/verl/blob/main/verl/third_party/vllm/vllm_v_0_5_4/dtensor_weight_loaders.py>`_.:
15
+
16
+ - ``GPT2LMHeadModel``
17
+ - ``LlamaForCausalLM``
18
+ - ``LLaMAForCausalLM``
19
+ - ``MistralForCausalLM``
20
+ - ``InternLMForCausalLM``
21
+ - ``AquilaModel``
22
+ - ``AquilaForCausalLM``
23
+ - ``Phi3ForCausalLM``
24
+ - ``GemmaForCausalLM``
25
+ - ``Gemma2ForCausalLM``
26
+ - ``GPTBigCodeForCausalLM``
27
+ - ``Starcoder2ForCausalLM``
28
+ - ``Qwen2ForCausalLM``
29
+ - ``DeepseekV2ForCausalLM``
30
+
31
+ To implement ``dtensor_weight_loader`` of a model that's supported in
32
+ vLLM, follow the guide of gemma model below:
33
+
34
+ 1. Copy the
35
+ ``load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]])`` from the vllm model class
36
+ to ``dtensor_weight_loaders.py``
37
+ 2. Modify the arguments to
38
+ ``(actor_weights: Dict, vllm_model: nn.Module)``
39
+ 3. Replace the ``self`` to ``vllm_model``
40
+ 4. Add the
41
+ ``local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)``
42
+ before each ``param = params_dict[name]`` and modify the following
43
+ weight loading using ``local_loaded_weight``.
44
+ 5. Register the implemented dtensor weight loader to ``__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__``.
45
+
46
+ .. code-block:: diff
47
+
48
+ - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
49
+ + def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
50
+ stacked_params_mapping = [
51
+ # (param_name, shard_name, shard_id)
52
+ ("qkv_proj", "q_proj", "q"),
53
+ ("qkv_proj", "k_proj", "k"),
54
+ ("qkv_proj", "v_proj", "v"),
55
+ ("gate_up_proj", "gate_proj", 0),
56
+ ("gate_up_proj", "up_proj", 1),
57
+ ]
58
+ - params_dict = dict(self.named_parameters())
59
+ + params_dict = dict(vllm_model.named_parameters())
60
+ loaded_params = set()
61
+ - for name, loaded_weight in weights:
62
+ + for name, loaded_weight in actor_weights.items():
63
+ for (param_name, shard_name, shard_id) in stacked_params_mapping:
64
+ if shard_name not in name:
65
+ continue
66
+ name = name.replace(shard_name, param_name)
67
+ # Skip loading extra bias for GPTQ models.
68
+ if name.endswith(".bias") and name not in params_dict:
69
+ continue
70
+ + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
71
+ param = params_dict[name]
72
+ weight_loader = param.weight_loader
73
+ - weight_loader(param, loaded_weight, shard_id)
74
+ + weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
75
+ break
76
+ else:
77
+ # lm_head is not used in vllm as it is tied with embed_token.
78
+ # To prevent errors, skip loading lm_head.weight.
79
+ if "lm_head.weight" in name:
80
+ continue
81
+ # Skip loading extra bias for GPTQ models.
82
+ if name.endswith(".bias") and name not in params_dict:
83
+ continue
84
+ + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
85
+ param = params_dict[name]
86
+ weight_loader = getattr(param, "weight_loader",
87
+ default_weight_loader)
88
+ - weight_loader(param, loaded_weight)
89
+ + weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
90
+ loaded_params.add(name)
91
+ unloaded_params = params_dict.keys() - loaded_params
92
+ if unloaded_params:
93
+ raise RuntimeError(
94
+ "Some weights are not initialized from checkpoints: "
95
+ f"{unloaded_params}")
deep_search/DeepResearcher/docs/advance/megatron_extension.rst ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Add models with the Megatron-LM backend
2
+ =========================================
3
+
4
+ Model
5
+ -----------
6
+
7
+ The most challenging aspect to use the Megatron-LM backend is implementing
8
+ the models for training. Currently, we implement Llama model that
9
+ support data parallelism, tensor parallelism, pipeline parallelism (also
10
+ vPP) and sequence parallelism. We also implement remove padding (sequence packing) on Llama
11
+ model, which can be found in `modeling_llama_megatron.py <https://github.com/volcengine/verl/blob/main/verl/models/llama/megatron/modeling_llama_megatron.py>`_.
12
+
13
+ To support other model, users are required to implement:
14
+
15
+ 1. Implemnt a model similar to ``modeling_llama_megatron.py`` that satisfy the
16
+ parallelism requirements of Megatron-LM. Then register your model in
17
+ the `registry.py <https://github.com/volcengine/verl/blob/main/verl/models/registry.py>`_.
18
+ 2. Checkpoint utils that can load full checkpoint (e.g. huggingface
19
+ checkpoint) to partitioned models during the runtime. Then register
20
+ your loader to ``weight_loader_registry`` in `weight_loader_registry.py <https://github.com/volcengine/verl/blob/main/verl/models/weight_loader_registry.py>`_.
21
+ 3. Weight loader that synchronize the weight from Megatron to rollout
22
+ (vLLM) model. Note that both the actor model and rollout model are
23
+ partitioned during runtime. So, it's advisable to map the model name
24
+ in actor model implementation. Otherwise, you may need an additional
25
+ name mapping and even weight transformation. The weight loader implementation
26
+ is in `megatron_weight_loaders.py <https://github.com/volcengine/verl/blob/main/verl/third_party/vllm/vllm_v_0_6_3/megatron_weight_loaders.py>`_.
deep_search/DeepResearcher/docs/advance/placement.rst ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Ray API Design Tutorial
2
+ =======================================
3
+
4
+ We provide a tutorial for our Ray API design, including:
5
+
6
+ - Ray basic concepts
7
+ - Resource Pool and RayWorkerGroup
8
+ - Data Dispatch, Execution and Collection
9
+ - Initialize the RayWorkerGroup and execute the distributed computation in the given Resource Pool
10
+
11
+ See details in `tutorial.ipynb <https://github.com/volcengine/verl/blob/main/examples/ray/tutorial.ipynb>`_.
deep_search/DeepResearcher/docs/conf.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Configuration file for the Sphinx documentation builder.
16
+ #
17
+ # This file only contains a selection of the most common options. For a full
18
+ # list see the documentation:
19
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
20
+
21
+ # -- Path setup --------------------------------------------------------------
22
+
23
+ # If extensions (or modules to document with autodoc) are in another directory,
24
+ # add these directories to sys.path here. If the directory is relative to the
25
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
26
+ #
27
+ # import os
28
+ # import sys
29
+ # sys.path.insert(0, os.path.abspath('.'))
30
+
31
+
32
+ # -- Project information -----------------------------------------------------
33
+
34
+ project = u'verl'
35
+ # pylint: disable=W0622
36
+ copyright = u'2024 ByteDance Seed Foundation MLSys Team'
37
+ author = u'Guangming Sheng, Chi Zhang, Yanghua Peng, Haibin Lin'
38
+
39
+
40
+ # -- General configuration ---------------------------------------------------
41
+ # The master toctree document.
42
+ master_doc = 'index'
43
+
44
+ # Add any Sphinx extension module names here, as strings. They can be
45
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
46
+ # ones.
47
+ extensions = ['recommonmark',
48
+ 'sphinx.ext.autodoc',
49
+ 'sphinx.ext.autosummary',
50
+ 'sphinx.ext.autosectionlabel',
51
+ ]
52
+
53
+ # The suffix(es) of source filenames.
54
+ # You can specify multiple suffix as a list of string:
55
+ source_suffix = ['.rst', 'rest', '.md']
56
+
57
+ # Add any paths that contain templates here, relative to this directory.
58
+ templates_path = ['_templates']
59
+
60
+ # The language for content autogenerated by Sphinx. Refer to documentation
61
+ # for a list of supported languages.
62
+ #
63
+ # This is also used if you do content translation via gettext catalogs.
64
+ # Usually you set "language" from the command line for these cases.
65
+ language = u'en'
66
+
67
+ # List of patterns, relative to source directory, that match files and
68
+ # directories to ignore when looking for source files.
69
+ # This pattern also affects html_static_path and html_extra_path.
70
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
71
+
72
+
73
+ # -- Options for HTML output -------------------------------------------------
74
+
75
+ # The theme to use for HTML and HTML Help pages. See the documentation for
76
+ # a list of builtin themes.
77
+ #
78
+ html_theme = 'sphinx_rtd_theme'
79
+
80
+ # Add any paths that contain custom static files (such as style sheets) here,
81
+ # relative to this directory. They are copied after the builtin static files,
82
+ # so a file named "default.css" will overwrite the builtin "default.css".
83
+ html_static_path = ['_static']
deep_search/DeepResearcher/docs/data.rst ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Data interface
2
+ =========================
3
+
4
+ DataProto is the interface for data exchange.
5
+
6
+ The :class:`verl.DataProto` class contains two key members:
7
+
8
+ - batch: a :class:`tensordict.TensorDict` object for the actual data
9
+ - meta_info: a :class:`Dict` with additional meta information
10
+
11
+ TensorDict
12
+ ~~~~~~~~~~~~
13
+
14
+ :attr:`DataProto.batch` is built on top of :class:`tensordict`, a project in the PyTorch ecosystem.
15
+ A TensorDict is a dict-like container for tensors. To instantiate a TensorDict, you must specify key-value pairs as well as the batch size.
16
+
17
+ .. code-block:: python
18
+
19
+ >>> import torch
20
+ >>> from tensordict import TensorDict
21
+ >>> tensordict = TensorDict({"zeros": torch.zeros(2, 3, 4), "ones": torch.ones(2, 3, 5)}, batch_size=[2,])
22
+ >>> tensordict["twos"] = 2 * torch.ones(2, 5, 6)
23
+ >>> zeros = tensordict["zeros"]
24
+ >>> tensordict
25
+ TensorDict(
26
+ fields={
27
+ ones: Tensor(shape=torch.Size([2, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False),
28
+ twos: Tensor(shape=torch.Size([2, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False),
29
+ zeros: Tensor(shape=torch.Size([2, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)},
30
+ batch_size=torch.Size([2]),
31
+ device=None,
32
+ is_shared=False)
33
+
34
+ One can also index a tensordict along its batch_size. The contents of the TensorDict can be manipulated collectively as well.
35
+
36
+ .. code-block:: python
37
+
38
+ >>> tensordict[..., :1]
39
+ TensorDict(
40
+ fields={
41
+ ones: Tensor(shape=torch.Size([1, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False),
42
+ twos: Tensor(shape=torch.Size([1, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False),
43
+ zeros: Tensor(shape=torch.Size([1, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)},
44
+ batch_size=torch.Size([1]),
45
+ device=None,
46
+ is_shared=False)
47
+ >>> tensordict = tensordict.to("cuda:0")
48
+ >>> tensordict = tensordict.reshape(6)
49
+
50
+ For more about :class:`tensordict.TensorDict` usage, see the official tensordict_ documentation.
51
+
52
+ .. _tensordict: https://pytorch.org/tensordict/overview.html
53
+
54
+
55
+ Core APIs
56
+ ~~~~~~~~~~~~~~~~~
57
+
58
+ .. autoclass:: verl.DataProto
59
+ :members: to, select, union, make_iterator, concat
deep_search/DeepResearcher/docs/examples/config.rst ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _config-explain-page:
2
+
3
+ Config Explanation
4
+ ===================
5
+
6
+ ppo_trainer.yaml for FSDP Backend
7
+ ---------------------------------
8
+
9
+ Data
10
+ ~~~~
11
+
12
+ .. code:: yaml
13
+
14
+ data:
15
+ tokenizer: null
16
+ train_files: ~/data/rlhf/gsm8k/train.parquet
17
+ val_files: ~/data/rlhf/gsm8k/test.parquet
18
+ prompt_key: prompt
19
+ max_prompt_length: 512
20
+ max_response_length: 512
21
+ train_batch_size: 1024
22
+ return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs
23
+ return_raw_chat: False
24
+
25
+ - ``data.train_files``: Training set parquet. Can be a list or a single
26
+ file. The program will read all files into memory, so it can't be too
27
+ large (< 100GB). The path can be either local path or HDFS path. For
28
+ HDFS path, we provide utils to download it to DRAM and convert the
29
+ HDFS path to local path.
30
+ - ``data.val_files``: Validation parquet. Can be a list or a single
31
+ file.
32
+ - ``data.prompt_key``: The field in the dataset where the prompt is
33
+ located. Default is 'prompt'.
34
+ - ``data.max_prompt_length``: Maximum prompt length. All prompts will be
35
+ left-padded to this length. An error will be reported if the length is
36
+ too long
37
+ - ``data.max_response_length``: Maximum response length. Rollout in RL
38
+ algorithms (e.g. PPO) generates up to this length
39
+ - ``data.train_batch_size``: Batch size sampled for one training
40
+ iteration of different RL algorithms.
41
+ - ``data.return_raw_input_ids``: Whether to return the original
42
+ input_ids without adding chat template. This is mainly used to
43
+ accommodate situations where the reward model's chat template differs
44
+ from the policy. It needs to be decoded first, then apply the RM's
45
+ chat template. If using a model-based RM, and the policy and RM
46
+ chat_templates are different, this flag needs to be set
47
+ - ``data.return_raw_chat``:
48
+ - ``data.truncation``: Truncate the input_ids or prompt length if they
49
+ exceed max_prompt_length. Default is 'error', not allow exceed the
50
+ max_prompt_length. The users should increase the max_prompt_length if
51
+ throwing the error.
52
+
53
+ Actor/Rollout/Reference Policy
54
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
55
+
56
+ .. code:: yaml
57
+
58
+ actor_rollout_ref:
59
+ hybrid_engine: True
60
+ model:
61
+ path: ~/models/deepseek-llm-7b-chat
62
+ external_lib: null
63
+ override_config: { }
64
+ enable_gradient_checkpointing: False
65
+ use_remove_padding: False
66
+ actor:
67
+ strategy: fsdp # This is for backward-compatibility
68
+ ppo_mini_batch_size: 256
69
+ ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu
70
+ ppo_micro_batch_size_per_gpu: 8
71
+ use_dynamic_bsz: False
72
+ ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length}
73
+ grad_clip: 1.0
74
+ clip_ratio: 0.2
75
+ entropy_coeff: 0.001
76
+ use_kl_loss: False # True for GRPO
77
+ kl_loss_coef: 0.001 # for grpo
78
+ kl_loss_type: low_var_kl # for grpo
79
+ ppo_epochs: 1
80
+ shuffle: False
81
+ ulysses_sequence_parallel_size: 1 # sp size
82
+ optim:
83
+ lr: 1e-6
84
+ lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime
85
+ min_lr_ratio: null # only useful for warmup with cosine
86
+ warmup_style: constant # select from constant/cosine
87
+ total_training_steps: -1 # must be override by program
88
+ fsdp_config:
89
+ wrap_policy:
90
+ # transformer_layer_cls_to_wrap: None
91
+ min_num_params: 0
92
+ param_offload: False
93
+ optimizer_offload: False
94
+ fsdp_size: -1
95
+ ref:
96
+ fsdp_config:
97
+ param_offload: False
98
+ wrap_policy:
99
+ # transformer_layer_cls_to_wrap: None
100
+ min_num_params: 0
101
+ log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu
102
+ log_prob_micro_batch_size_per_gpu: 16
103
+ log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz}
104
+ log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}
105
+ ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # sp size
106
+ rollout:
107
+ name: vllm
108
+ temperature: 1.0
109
+ top_k: -1 # 0 for hf rollout, -1 for vllm rollout
110
+ top_p: 1
111
+ prompt_length: ${data.max_prompt_length} # not use for opensource
112
+ response_length: ${data.max_response_length}
113
+ # for vllm rollout
114
+ dtype: bfloat16 # should align with FSDP
115
+ gpu_memory_utilization: 0.5
116
+ ignore_eos: False
117
+ enforce_eager: True
118
+ free_cache_engine: True
119
+ load_format: dummy_dtensor
120
+ tensor_model_parallel_size: 2
121
+ max_num_batched_tokens: 8192
122
+ max_num_seqs: 1024
123
+ log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu
124
+ log_prob_micro_batch_size_per_gpu: 16
125
+ log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz}
126
+ log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}
127
+ # for hf rollout
128
+ do_sample: True
129
+ # number of responses (i.e. num sample times)
130
+ n: 1 # > 1 for grpo, rloo
131
+
132
+ **Common config for actor, rollout and reference model**
133
+
134
+ - ``actor_rollout_ref.hybrid_engine``: Whether it's a hybrid engine,
135
+ currently only supports hybrid engine
136
+ - ``actor_rollout_ref.model.path``: Huggingface model path. This can be
137
+ either local path or HDFS path. For HDFS path, we provide utils to
138
+ download it to DRAM and convert the HDFS path to local path.
139
+ - ``actor_rollout_ref.model.external_libs``: Additional Python packages
140
+ that need to be imported. Used to register models or tokenizers into
141
+ the Huggingface system.
142
+ - ``actor_rollout_ref.model.override_config``: Used to override some of
143
+ the model's original configurations, mainly dropout
144
+ - ``actor_rollout_ref.model.enable_gradient_checkpointing``: Whether to
145
+ enable gradient checkpointing for the actor
146
+
147
+ **Actor model**
148
+
149
+ - ``actor_rollout_ref.actor.strategy``: fsdp or megatron. In this
150
+ example, we use fsdp backend.
151
+
152
+ - ``actor_rollout_ref.actor.ppo_mini_batch_size``: One sample is split
153
+ into multiple sub-batches with batch_size=ppo_mini_batch_size for PPO
154
+ updates. The ppo_mini_batch_size is a global num across all workers/gpus
155
+
156
+ - ``actor_rollout_ref.actor.ppo_micro_batch_size``: [Will be deprecated, use ppo_micro_batch_size_per_gpu]
157
+ Similar to gradient accumulation, the micro_batch_size_per_gpu for one forward pass,
158
+ trading speed for GPU memory. The value represent the global view.
159
+
160
+ - ``actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu``: Similar to gradient
161
+ accumulation, the micro_batch_size_per_gpu for one forward pass, trading speed
162
+ for GPU memory. The value represent the local num per gpu.
163
+
164
+ - ``actor_rollout_ref.actor.grad_clip``: Gradient clipping for actor
165
+ updates
166
+
167
+ - ``actor_rollout_ref.actor.clip_ratio``: PPO clip ratio
168
+
169
+ - ``actor_rollout_ref.actor.entropy_coeff``: The weight of entropy when
170
+ calculating PPO loss
171
+
172
+ - ``actor_rollout_ref.actor.ppo_epochs``: Number of epochs for PPO
173
+ updates on one set of sampled data
174
+
175
+ - ``actor_rollout_ref.actor.shuffle``: Whether to shuffle data when
176
+ there are multiple epochs
177
+
178
+ - ``actor_rollout_ref.actor.optim``: Actor's optimizer parameters
179
+
180
+ - ``actor_rollout_ref.actor.fsdp_config``: FSDP config for actor
181
+ training
182
+
183
+ - ``wrap_policy``: FSDP wrap policy. By default, it uses Huggingface's
184
+ wrap policy, i.e., wrapping by DecoderLayer
185
+
186
+ - No need to set transformer_layer_cls_to_wrap, so we comment it.
187
+
188
+ - ``*_offload``: Whether to enable parameter, gradient and optimizer
189
+ offload
190
+
191
+ - Trading speed for GPU memory.
192
+
193
+ **Reference Model**
194
+
195
+ - ``actor_rollout_ref.ref``: FSDP config same as actor. **For models
196
+ larger than 7B, it's recommended to turn on offload for ref by
197
+ default**
198
+
199
+ - ``actor_rollout_ref.ref.log_prob_micro_batch_size``: [Will be deprecate, use log_prob_micro_batch_size_per_gpu]
200
+ The batch size for one forward pass in the computation of ``ref_log_prob``. The value represent the global num.
201
+
202
+ - ``actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu``: The batch size
203
+ for one forward pass in the computation of ``ref_log_prob``. The value represent the local num per gpu.
204
+
205
+ **Rollout Model**
206
+
207
+ - ``actor_rollout_ref.rollout.name``: hf/vllm. We use vLLM by default
208
+ because it's much efficient and our hybrid engine is implemented with
209
+ vLLM.
210
+
211
+ - Rollout (Auto-regressive) parameters. The key should be equal to the
212
+ property name in vLLM's ``SamplingParams``.
213
+
214
+ - ``temperature``, ``top_k``, ``top_p`` and others: Sampling
215
+ parameters in ``SamplingParams``.
216
+
217
+ - ``dtype``: Rollout model parameters type. This should be align with
218
+ the actor model parameter type in FSDP/Megatron backend.
219
+
220
+ - ``gpu_memory_utilization``: The proportion of the remaining GPU memory
221
+ allocated for kv cache after other models have initialized when using
222
+ vllm.
223
+
224
+ - ``tensor_model_parallel_size``: TP size for rollout. Only effective
225
+ for vllm.
226
+
227
+ - ``actor_rollout_ref.ref.log_prob_micro_batch_size``: [Will be deprecate, use log_prob_micro_batch_size_per_gpu]
228
+ The batch size for one forward pass in the computation of ``log_prob``. The value represent the global num.
229
+
230
+ - ``log_prob_micro_batch_size_per_gpu``: Micro batch size per gpu (The batch size for
231
+ one forward pass) for recalculating ``log_prob``. The value represent the local num per gpu.
232
+
233
+ - ``do_sample``: Whether to sample. If set to False, the rollout model
234
+ will perform greedy sampling. We disable ``do_sample`` during
235
+ validation.
236
+
237
+ - ``actor_rollout_ref.rollout.ignore_eos``: Whether to ignore the EOS
238
+ token and continue generating tokens after the EOS token is generated.
239
+
240
+ - ``actor_rollout_ref.rollout.free_cache_engine``: Offload the KVCache
241
+ after rollout generation stage. Default is True. When set to True, we
242
+ need to disable the usage of CUDAGraph (set ``enforce_eager`` to
243
+ True.)
244
+
245
+ - ``actor_rollout_ref.rollout.enforce_eager``: Whether to use CUDAGraph
246
+ in vLLM generation. Default set to True to disable CUDAGraph.
247
+
248
+ - ``actor_rollout_ref.rollout.load_format``: Which weight loader to use
249
+ to load the actor model weights to the rollout model.
250
+
251
+ - ``auto``: Use Megatron weight loader.
252
+ - ``megatron``: Use Megatron weight loader. Deployed with Megatron
253
+ backend. The input model ``state_dict()`` is already partitioned
254
+ along TP dimension and already gathered along PP dimension. This
255
+ weight loader requires that the Rollout model and Actor model's
256
+ parameters shape and name should be identical.
257
+ - ``dtensor``: Default solution when using Huggingface weight loader.
258
+ Deployed with FSDP backend and the state_dict_type is
259
+ ``StateDictType.SHARDED_STATE_DICT``. Recommend to use this weight
260
+ loader
261
+ - ``hf``: Use Huggingface weight loader. Deployed with FSDP backend
262
+ and the state_dict_type is ``StateDictType.FULL_STATE_DICT``. This
263
+ solution doesn't need to rewrite the weight loader for each model
264
+ implemented in vLLM but it results in larger peak memory usage.
265
+ - ``dummy_hf``, ``dummy_megatron``, ``dummy_dtensor``: Random
266
+ initialization.
267
+
268
+ .. note:: **NOTED**: In this config field, users only need to select from ``dummy_megatron``, ``dummy_dtensor``, ``dummy_hf`` for rollout initialization and our hybrid engine will select the corresponding weight loader (i.e., ``megatron``, ``dtensor``, ``hf``) during actor/rollout weight synchronization.
269
+
270
+ Critic Model
271
+ ~~~~~~~~~~~~
272
+
273
+ Most parameters for Critic are similar to Actor Model.
274
+
275
+ Reward Model
276
+ ~~~~~~~~~~~~
277
+
278
+ .. code:: yaml
279
+
280
+ reward_model:
281
+ enable: False
282
+ model:
283
+ input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical
284
+ path: ~/models/Anomy-RM-v0.1
285
+ external_lib: ${actor_rollout_ref.model.external_lib}
286
+ fsdp_config:
287
+ min_num_params: 0
288
+ param_offload: False
289
+ micro_batch_size_per_gpu: 16
290
+ max_length: null
291
+ reward_manager: naive
292
+
293
+ - ``reward_model.enable``: Whether to enable reward model. If False, we
294
+ compute the reward only with the user-defined reward functions. In
295
+ GSM8K and Math examples, we disable reward model. For RLHF alignment
296
+ example using full_hh_rlhf, we utilize reward model to assess the
297
+ responses. If False, the following parameters are not effective.
298
+ - ``reward_model.model``
299
+
300
+ - ``input_tokenizer``: Input tokenizer. If the reward model's chat
301
+ template is inconsistent with the policy, we need to first decode to
302
+ plaintext, then apply the rm's chat_template. Then score with RM. If
303
+ chat_templates are consistent, it can be set to null.
304
+ - ``path``: RM's HDFS path or local path. Note that RM only supports
305
+ AutoModelForSequenceClassification. Other model types need to define
306
+ their own RewardModelWorker and pass it from the code.
307
+ - ``reward_model.reward_manager``: Reward Manager. This defines the mechanism
308
+ of computing rule-based reward and handling different reward sources. Default
309
+ if ``naive``. If all verification functions are multiprocessing-safe, the reward
310
+ manager can be set to ``prime`` for parallel verification.
311
+
312
+ Algorithm
313
+ ~~~~~~~~~
314
+
315
+ .. code:: yaml
316
+
317
+ algorithm:
318
+ gamma: 1.0
319
+ lam: 1.0
320
+ adv_estimator: gae
321
+ kl_penalty: kl # how to estimate kl divergence
322
+ kl_ctrl:
323
+ type: fixed
324
+ kl_coef: 0.005
325
+
326
+ - ``gemma``: discount factor
327
+ - ``lam``: Trade-off between bias and variance in the GAE estimator
328
+ - ``adv_estimator``: Support ``gae``, ``grpo``, ``reinforce_plus_plus``, ``rloo``
329
+ - ``kl_penalty``: Support ``kl``, ``abs``, ``mse`` and ``full``. How to
330
+ calculate the kl divergence between actor and reference policy. For
331
+ specific options, refer to `core_algos.py <https://github.com/volcengine/verl/blob/main/verl/trainer/ppo/core_algos.py#L192>`_ .
332
+
333
+ Trainer
334
+ ~~~~~~~
335
+
336
+ .. code:: yaml
337
+
338
+ trainer:
339
+ total_epochs: 30
340
+ project_name: verl_examples
341
+ experiment_name: gsm8k
342
+ logger: ['console', 'wandb']
343
+ nnodes: 1
344
+ n_gpus_per_node: 8
345
+ save_freq: -1
346
+ test_freq: 2
347
+ critic_warmup: 0
348
+ default_hdfs_dir: ~/experiments/gsm8k/ppo/${trainer.experiment_name} # hdfs checkpoint path
349
+ default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} # local checkpoint path
350
+
351
+ - ``trainer.total_epochs``: Number of epochs in training.
352
+ - ``trainer.project_name``: For wandb
353
+ - ``trainer.experiment_name``: For wandb
354
+ - ``trainer.logger``: Support console and wandb
355
+ - ``trainer.nnodes``: Number of nodes used in the training.
356
+ - ``trainer.n_gpus_per_node``: Number of GPUs per node.
357
+ - ``trainer.save_freq``: The frequency (by iteration) to save checkpoint
358
+ of the actor and critic model.
359
+ - ``trainer.test_freq``: The validation frequency (by iteration).
360
+ - ``trainer.critic_warmup``: The number of iteration to train the critic
361
+ model before actual policy learning.
deep_search/DeepResearcher/docs/examples/gsm8k_example.rst ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GSM8K Example
2
+ =============
3
+
4
+ Introduction
5
+ ------------
6
+
7
+ In this example, we train an LLM to tackle the GSM8k task.
8
+
9
+ Paper: https://arxiv.org/pdf/2110.14168
10
+
11
+ Dataset: https://huggingface.co/datasets/gsm8k
12
+
13
+ Note that the original paper mainly focuses on training a verifier (a
14
+ reward model) to solve math problems via Best-of-N sampling. In this
15
+ example, we train an RLHF agent using a rule-based reward model.
16
+
17
+ Dataset Introduction
18
+ --------------------
19
+
20
+ GSM8k is a math problem dataset. The prompt is an elementary school
21
+ problem. The LLM model is required to answer the math problem.
22
+
23
+ The training set contains 7473 samples and the test set contains 1319
24
+ samples.
25
+
26
+ **An example**
27
+
28
+ Prompt
29
+
30
+ Katy makes coffee using teaspoons of sugar and cups of water in the
31
+ ratio of 7:13. If she used a total of 120 teaspoons of sugar and cups
32
+ of water, calculate the number of teaspoonfuls of sugar she used.
33
+
34
+ Solution
35
+
36
+ The total ratio representing the ingredients she used to make the
37
+ coffee is 7+13 = <<7+13=20>>20 Since the fraction representing the
38
+ number of teaspoons she used is 7/20, she used 7/20\ *120 =
39
+ <<7/20*\ 120=42>>42 #### 42
40
+
41
+ Step 1: Prepare dataset
42
+ -----------------------
43
+
44
+ .. code:: bash
45
+
46
+ cd examples/data_preprocess
47
+ python3 gsm8k.py --local_dir ~/data/gsm8k
48
+
49
+ Step 2: Download Model
50
+ ----------------------
51
+
52
+ There're three ways to prepare the model checkpoints for post-training:
53
+
54
+ - Download the required models from hugging face
55
+
56
+ .. code:: bash
57
+
58
+ huggingface-cli download deepseek-ai/deepseek-math-7b-instruct --local-dir ~/models/deepseek-math-7b-instruct --local-dir-use-symlinks False
59
+
60
+ - Already store your store model in the local directory or HDFS path.
61
+ - Also, you can directly use the model name in huggingface (e.g.,
62
+ deepseek-ai/deepseek-math-7b-instruct) in
63
+ ``actor_rollout_ref.model.path`` and ``critic.model.path`` field in
64
+ the run script.
65
+
66
+ Noted that users should prepare checkpoints for actor, critic and reward
67
+ model.
68
+
69
+ [Optional] Step 3: SFT your Model
70
+ ---------------------------------
71
+
72
+ We provide a SFT Trainer using PyTorch FSDP in
73
+ `fsdp_sft_trainer.py <https://github.com/volcengine/verl/blob/main/verl/trainer/fsdp_sft_trainer.py>`_.
74
+ Users can customize their own SFT
75
+ script using our FSDP SFT Trainer.
76
+
77
+ We also provide various training scripts for SFT on GSM8K dataset in `gsm8k sft directory <https://github.com/volcengine/verl/blob/main/examples/sft/gsm8k/>`_.
78
+
79
+ .. code:: shell
80
+
81
+ set -x
82
+
83
+ torchrun -m verl.trainer.fsdp_sft_trainer \
84
+ data.train_files=$HOME/data/gsm8k/train.parquet \
85
+ data.val_files=$HOME/data/gsm8k/test.parquet \
86
+ data.prompt_key=question \
87
+ data.response_key=answer \
88
+ data.micro_batch_size_per_gpu=8 \
89
+ model.partial_pretrain=deepseek-ai/deepseek-coder-6.7b-instruct \
90
+ trainer.default_hdfs_dir=hdfs://user/verl/experiments/gsm8k/deepseek-coder-6.7b-instruct/ \
91
+ trainer.project_name=gsm8k-sft \
92
+ trainer.experiment_name=gsm8k-sft-deepseek-coder-6.7b-instruct \
93
+ trainer.total_epochs=4 \
94
+ trainer.logger=['console','wandb']
95
+
96
+ Step 4: Perform PPO training with your model on GSM8K Dataset
97
+ -------------------------------------------------------------
98
+
99
+ - Prepare your own run.sh script. Here's an example for GSM8k dataset
100
+ and deepseek-llm-7b-chat model.
101
+ - Users could replace the ``data.train_files`` ,\ ``data.val_files``,
102
+ ``actor_rollout_ref.model.path`` and ``critic.model.path`` based on
103
+ their environment.
104
+ - See :doc:`config` for detailed explanation of each config field.
105
+
106
+ **Reward Model/Function**
107
+
108
+ We use a rule-based reward model. We force the model to produce a final
109
+ answer following 4 “#” as shown in the solution. We extract the final
110
+ answer from both the solution and model's output using regular
111
+ expression matching. We compare them and assign a reward of 1 to correct
112
+ answer, 0.1 to incorrect answer and 0 to no answer.
113
+
114
+ **Training Script**
115
+
116
+ The training script example for FSDP and Megatron-LM backend are stored in examples/ppo_trainer directory.
117
+
118
+ .. code:: bash
119
+
120
+ cd ../ppo_trainer
121
+ bash run_deepseek7b_llm.sh
122
+
123
+ The script of run_deepseek7b_llm.sh
124
+
125
+ .. code:: bash
126
+
127
+ set -x
128
+
129
+ python3 -m verl.trainer.main_ppo \
130
+ data.train_files=$HOME/data/gsm8k/train.parquet \
131
+ data.val_files=$HOME/data/gsm8k/test.parquet \
132
+ data.train_batch_size=1024 \
133
+ data.max_prompt_length=512 \
134
+ data.max_response_length=512 \
135
+ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
136
+ actor_rollout_ref.actor.optim.lr=1e-6 \
137
+ actor_rollout_ref.model.use_remove_padding=True \
138
+ actor_rollout_ref.actor.ppo_mini_batch_size=256 \
139
+ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \
140
+ actor_rollout_ref.actor.fsdp_config.param_offload=False \
141
+ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
142
+ actor_rollout_ref.model.enable_gradient_checkpointing=True \
143
+ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \
144
+ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
145
+ actor_rollout_ref.rollout.name=vllm \
146
+ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
147
+ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \
148
+ actor_rollout_ref.ref.fsdp_config.param_offload=True \
149
+ critic.optim.lr=1e-5 \
150
+ critic.model.use_remove_padding=True \
151
+ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
152
+ critic.model.enable_gradient_checkpointing=True \
153
+ critic.ppo_micro_batch_size_per_gpu=32 \
154
+ critic.model.fsdp_config.param_offload=False \
155
+ critic.model.fsdp_config.optimizer_offload=False \
156
+ algorithm.kl_ctrl.kl_coef=0.001 \
157
+ trainer.critic_warmup=0 \
158
+ trainer.logger=['console','wandb'] \
159
+ trainer.project_name='verl_example_gsm8k' \
160
+ trainer.experiment_name='deepseek_llm_7b_function_rm' \
161
+ trainer.n_gpus_per_node=8 \
162
+ trainer.nnodes=1 \
163
+ trainer.save_freq=-1 \
164
+ trainer.test_freq=1 \
165
+ trainer.total_epochs=15 $@
deep_search/DeepResearcher/docs/examples/ppo_code_architecture.rst ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PPO Example Architecture
2
+ ========================
3
+
4
+ Let's start with the Proximal Policy Optimization algorithm, which is
5
+ most widely used algorithm in LLM post-training.
6
+
7
+ The main entry point of the PPO algorithm example is:
8
+ `main_ppo.py <https://github.com/volcengine/verl/blob/main/verl/trainer/main_ppo.py>`_.
9
+ In this tutorial, we will go through the code architecture in `main_ppo.py <https://github.com/volcengine/verl/blob/main/verl/trainer/main_ppo.py>`_.
10
+
11
+ Define the data
12
+ ---------------
13
+
14
+ Users need to preprocess and store the dataset in parquet files.
15
+ And we implement `RLHFDataset` to load and tokenize the parquet files.
16
+
17
+ For ``RLHFDataset`` (Default), at least 1 fields are required:
18
+
19
+ - ``prompt``: Contains the string prompt
20
+
21
+ We already provide some examples of processing the datasets to parquet
22
+ files in `data_preprocess directory <https://github.com/volcengine/verl/blob/main/examples/data_preprocess>`_. Currently, we support
23
+ preprocess of GSM8k, MATH, Hellasage, Full_hh_rlhf datasets. See :doc:`../preparation/prepare_data` for
24
+ more information.
25
+
26
+ Define the reward functions for different datasets
27
+ --------------------------------------------------
28
+
29
+ In this main entry point, the users only need to define their own reward
30
+ function based on the datasets (or applications) utilized in PPO
31
+ training.
32
+
33
+ For example, we already provide reward functions for `GSM8k <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score/gsm8k.py>`_
34
+ and `MATH <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score/math.py>`_
35
+ datasets in the ``_select_rm_score_fn``. In the ``RewardManager``, we
36
+ will compute the reward score based on the data_source to select
37
+ corresponding reward functions. For some RLHF datasets (e.g.,
38
+ full_hh_rlhf), the reward model is utilized to assess the responses
39
+ without any reward functions. In this case, the ``RewardManager`` will
40
+ return the ``rm_score`` computed by the reward model directly.
41
+
42
+ See `reward functions <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score>`_ for detailed implementation.
43
+
44
+ Define worker classes
45
+ ---------------------
46
+
47
+ .. code:: python
48
+
49
+ if config.actor_rollout_ref.actor.strategy == 'fsdp': # for FSDP backend
50
+ assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
51
+ from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker
52
+ from verl.single_controller.ray import RayWorkerGroup
53
+ ray_worker_group_cls = RayWorkerGroup
54
+
55
+ elif config.actor_rollout_ref.actor.strategy == 'megatron': # for Megatron backend
56
+ assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
57
+ from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker
58
+ from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup
59
+ ray_worker_group_cls = NVMegatronRayWorkerGroup # Ray worker class for Megatron-LM
60
+
61
+ else:
62
+ raise NotImplementedError
63
+
64
+ from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role
65
+
66
+ role_worker_mapping = {
67
+ Role.ActorRollout: ActorRolloutRefWorker,
68
+ Role.Critic: CriticWorker,
69
+ Role.RefPolicy: ActorRolloutRefWorker
70
+ }
71
+
72
+ global_pool_id = 'global_pool'
73
+ resource_pool_spec = {
74
+ global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
75
+ }
76
+ mapping = {
77
+ Role.ActorRollout: global_pool_id,
78
+ Role.Critic: global_pool_id,
79
+ Role.RefPolicy: global_pool_id,
80
+ }
81
+
82
+ Step 1: Construct the mapping between roles and workers
83
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84
+
85
+ A role represents a group of workers in the same process. We have
86
+ pre-defined several roles in `ray_trainer.py <https://github.com/volcengine/verl/blob/main/verl/trainer/ppo/ray_trainer.py#L38>`_.
87
+
88
+ .. code:: python
89
+
90
+ class Role(Enum):
91
+ """
92
+ To create more roles dynamically, you can subclass Role and add new members
93
+ """
94
+ Actor = 0 # This worker only has Actor
95
+ Rollout = 1 # This worker only has Rollout
96
+ ActorRollout = 2 # This worker has both actor and rollout, it's a HybridEngine
97
+ Critic = 3 # This worker only has critic
98
+ RefPolicy = 4 # This worker only has reference policy
99
+ RewardModel = 5 # This worker only has reward model
100
+ ActorRolloutRef = 6 # This worker contains actor, rollout and reference policy simultaneously
101
+
102
+ Step 2: Define the worker class corresponding to this role
103
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
104
+
105
+ - We have pre-implemented the ``ActorRolloutRefWorker``. Through
106
+ different configs, it can be a standalone actor, a standalone rollout,
107
+ an ActorRollout HybridEngine, or an ActorRolloutRef HybridEngine
108
+ - We also pre-implemented workers for ``Actor``, ``Rollout``,
109
+ ``Critic``, ``Reward Model`` and ``Reference model`` on two different
110
+ backend: PyTorch FSDP
111
+ and Megatron-LM.
112
+ See `FSDP Workers <https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py>`_
113
+ and `Megatron-LM Workers <https://github.com/volcengine/verl/blob/main/verl/workers/megatron_workers.py>`_
114
+ for more information.
115
+
116
+ Step 3: Define resource pool id and resource pool spec
117
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
118
+
119
+ - Resource pool is a division of global GPU resources,
120
+ ``resource_pool_spec`` is a dict, mapping from id to # of GPUs
121
+
122
+ - In the above example, we defined a global resource pool:
123
+ global_pool_id, and then put all roles on this one resource pool
124
+ with all the GPUs in this post-training task. This refers to
125
+ *co-locate* placement where all the models share the same set of
126
+ GPUs.
127
+
128
+ - See resource pool and placement for advance usage.
129
+
130
+ Defining reward model/function
131
+ ------------------------------
132
+
133
+ .. code:: python
134
+
135
+ # we should adopt a multi-source reward function here
136
+ # - for rule-based rm, we directly call a reward score
137
+ # - for model-based rm, we call a model
138
+ # - for code related prompt, we send to a sandbox if there are test cases
139
+ # - finally, we combine all the rewards together
140
+ # - The reward type depends on the tag of the data
141
+ if config.reward_model.enable:
142
+ from verl.workers.fsdp_workers import RewardModelWorker
143
+ role_worker_mapping[Role.RewardModel] = RewardModelWorker
144
+ mapping[Role.RewardModel] = global_pool_id
145
+
146
+ reward_fn = RewardManager(tokenizer=tokenizer, num_examine=0)
147
+
148
+ # Note that we always use function-based RM for validation
149
+ val_reward_fn = RewardManager(tokenizer=tokenizer, num_examine=1)
150
+
151
+ resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
152
+
153
+ Since not all tasks use model-based RM, users need to define here
154
+ whether it's a model-based RM or a function-based RM
155
+
156
+ - If it's a model-based RM, directly add the ``RewardModel`` role in the
157
+ resource mapping and add it to the resource pool mapping.
158
+
159
+ - Note that the pre-defined ``RewardModelWorker`` only supports models
160
+ with the structure of huggingface
161
+ ``AutoModelForSequenceClassification``. If it's not this model, you
162
+ need to define your own RewardModelWorker in `FSDP Workers <https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py>`_
163
+ and `Megatron-LM Workers <https://github.com/volcengine/verl/blob/main/verl/workers/megatron_workers.py>`_.
164
+
165
+ - If it's a function-based RM, the users are required to classified the
166
+ reward function for each datasets.
167
+
168
+ .. code:: python
169
+
170
+ def _select_rm_score_fn(data_source):
171
+ if data_source == 'openai/gsm8k':
172
+ return gsm8k.compute_score
173
+ elif data_source == 'lighteval/MATH':
174
+ return math.compute_score
175
+ else:
176
+ raise NotImplementedError
177
+
178
+ See reward functions implemented in `directory <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score/>`_
179
+ for more information.
180
+
181
+ Define, init and run the PPO Trainer
182
+ ------------------------------------
183
+
184
+ .. code:: python
185
+
186
+ trainer = RayPPOTrainer(config=config,
187
+ tokenizer=tokenizer,
188
+ role_worker_mapping=role_worker_mapping,
189
+ resource_pool_manager=resource_pool_manager,
190
+ ray_worker_group_cls=ray_worker_group_cls,
191
+ reward_fn=reward_fn,
192
+ val_reward_fn=val_reward_fn)
193
+ trainer.init_workers()
194
+ trainer.fit()
195
+
196
+ - We first initialize the ``RayPPOTrainer`` with user config, tokenizer
197
+ and all the above worker mapping, resource pool, worker group and
198
+ reward functions
199
+ - We first call the ``trainer.init_workers()`` to initialize the models
200
+ on the allocated GPUs (in the resource pool)
201
+ - The actual PPO training will be executed in ``trainer.fit()``
202
+
203
+ verl can be easily extended to other RL algorithms by reusing the Ray
204
+ model workers, resource pool and reward functions. See :doc:`extension<../advance/dpo_extension>` for
205
+ more information.
206
+
207
+ Details of the ``RayPPOTrainer`` is discussed in :doc:`Ray Trainer<../workers/ray_trainer>`.
deep_search/DeepResearcher/docs/experiment/ppo.rst ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _algo-baseline-page:
2
+
3
+ Algorithm Baselines
4
+ ===================
5
+
6
+ GSM8k
7
+ ------------------
8
+
9
+ Assuming GSM8k dataset is preprocess via ``python3 examples/data_preprocess/gsm8k.py``
10
+
11
+ Refer to the table below to reproduce PPO training from different pre-trained models.
12
+
13
+ .. _Huggingface: https://huggingface.co/google/gemma-2-2b-it#benchmark-results
14
+ .. _SFT Command and Logs: https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/gemma-2-2b-it-sft-0.411.log
15
+ .. _SFT+PPO Command and Logs: https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/gemma-2-2b-it-ppo-bsz512_4-prompt1024-resp-512-0.640.log
16
+ .. _wandb: https://api.wandb.ai/links/verl-team/h7ux8602
17
+ .. _Qwen Blog: https://qwenlm.github.io/blog/qwen2.5-llm/
18
+ .. _PPO Command and Logs: https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log
19
+ .. _Megatron PPO Command and Logs: https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/deepseek-llm-7b-chat-megatron-bsz256_4-prompt512-resp512-0.695.log
20
+ .. _Qwen7b GRPO Script: https://github.com/volcengine/verl/blob/a65c9157bc0b85b64cd753de19f94e80a11bd871/examples/grpo_trainer/run_qwen2-7b_seq_balance.sh
21
+ .. _Megatron wandb: https://wandb.ai/verl-team/verl_megatron_gsm8k_examples/runs/10fetyr3
22
+ .. _Qwen7b ReMax Script: https://github.com/eric-haibin-lin/verl/blob/main/examples/remax_trainer/run_qwen2.5-3b_seq_balance.sh
23
+ .. _Qwen7b ReMax Wandb: https://wandb.ai/liziniu1997/verl_remax_example_gsm8k/runs/vxl10pln
24
+
25
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
26
+ | Model | Method | Test score | Details |
27
+ +==================================+========================+============+=====================+=========================================================================+
28
+ | google/gemma-2-2b-it | pretrained checkpoint | 23.9 | `Huggingface`_ |
29
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
30
+ | google/gemma-2-2b-it | SFT | 52.06 | `SFT Command and Logs`_ |
31
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
32
+ | google/gemma-2-2b-it | SFT + PPO | 64.02 | `SFT+PPO Command and Logs`_, `wandb`_ |
33
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
34
+ | Qwen/Qwen2.5-0.5B-Instruct | pretrained checkpoint | 36.4 | `Qwen Blog`_ |
35
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
36
+ | Qwen/Qwen2.5-0.5B-Instruct | PPO | 56.7 | `PPO Command and Logs`_ |
37
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
38
+ | deepseek-ai/deepseek-llm-7b-chat | PPO | 69.5 [1]_ | `Megatron PPO Command and Logs`_, `Megatron wandb`_ |
39
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
40
+ | Qwen/Qwen2-7B-Instruct | GRPO | 89 | `Qwen7b GRPO Script`_ |
41
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
42
+ | Qwen/Qwen2.5-7B-Instruct | ReMax | 97 | `Qwen7b ReMax Script`_, `Qwen7b ReMax Wandb`_ |
43
+ +----------------------------------+------------------------+------------+-----------------------------------------------------------------------------------------------+
44
+
45
+ .. [1] During the evaluation, we have only extracted answers following the format "####". A more flexible answer exaction, longer response length and better prompt engineering may lead to higher score.
deep_search/DeepResearcher/docs/faq/faq.rst ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Frequently Asked Questions
2
+ ====================================
3
+
4
+ Ray related
5
+ ------------
6
+
7
+ How to add breakpoint for debugging with distributed Ray?
8
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9
+
10
+ Please checkout the official debugging guide from Ray: https://docs.ray.io/en/latest/ray-observability/ray-distributed-debugger.html
11
+
12
+
13
+ Distributed training
14
+ ------------------------
15
+
16
+ How to run multi-node post-training with Ray?
17
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
18
+
19
+ You can start a ray cluster and submit a ray job, following the official guide from Ray: https://docs.ray.io/en/latest/ray-core/starting-ray.html
20
+
21
+ Then in the configuration, set the ``trainer.nnode`` config to the number of machines for your job.
22
+
23
+ How to use verl on a Slurm-managed cluster?
24
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
25
+
26
+ Ray provides users with `this <https://docs.ray.io/en/latest/cluster/vms/user-guides/community/slurm.html>`_ official
27
+ tutorial to start a Ray cluster on top of Slurm. We have verified the :doc:`GSM8K example<../examples/gsm8k_example>`
28
+ on a Slurm cluster under a multi-node setting with the following steps.
29
+
30
+ 1. [Optional] If your cluster support `Apptainer or Singularity <https://apptainer.org/docs/user/main/>`_ and you wish
31
+ to use it, convert verl's Docker image to an Apptainer image. Alternatively, set up the environment with the package
32
+ manager available on your cluster or use other container runtimes (e.g. through `Slurm's OCI support <https://slurm.schedmd.com/containers.html>`_) available to you.
33
+
34
+ .. code:: bash
35
+
36
+ apptainer pull /your/dest/dir/vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3.sif docker://verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
37
+
38
+ 2. Follow :doc:`GSM8K example<../examples/gsm8k_example>` to prepare the dataset and model checkpoints.
39
+
40
+ 3. Modify `examples/slurm/ray_on_slurm.slurm <https://github.com/volcengine/verl/blob/main/verl/examples/slurm/ray_on_slurm.slurm>`_ with your cluster's own information.
41
+
42
+ 4. Submit the job script to the Slurm cluster with `sbatch`.
43
+
44
+ Please note that Slurm cluster setup may vary. If you encounter any issues, please refer to Ray's
45
+ `Slurm user guide <https://docs.ray.io/en/latest/cluster/vms/user-guides/community/slurm.html>`_ for common caveats.
46
+
47
+ Illegal memory access
48
+ ---------------------------------
49
+
50
+ If you encounter the error message like ``CUDA error: an illegal memory access was encountered`` during rollout, most likely it is due to a known issue from vllm.
51
+ Please set the following environment variable. The env var must be set before the ``ray start`` command if any.
52
+
53
+ .. code:: bash
54
+
55
+ export VLLM_ATTENTION_BACKEND=XFORMERS
56
+
57
+ If in doubt, print this env var in each rank to make sure it is properly set.
58
+
59
+ Checkpoints
60
+ ------------------------
61
+
62
+ If you want to convert the model checkpoint into huggingface safetensor format, please refer to ``scripts/model_merger.py``.
deep_search/DeepResearcher/docs/hybrid_flow.rst ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =========================================================
2
+ HybridFlow Programming Guide
3
+ =========================================================
4
+
5
+ .. _vermouth: https://github.com/vermouth1992
6
+
7
+ Author: `Chi Zhang <https://github.com/vermouth1992>`_
8
+
9
+ verl is an open source implementation of the paper `HybridFlow <https://arxiv.org/abs/2409.19256v2>`_ [1]_. In this section, we will introduce the basic concepts of HybridFlow, the motivation and how to program with verl APIs.
10
+
11
+ Motivation and Design
12
+ ------------------------
13
+ We use dataflow to represent RL systems. [4]_.
14
+
15
+ DataFlow
16
+ ~~~~~~~~~~~~~~~~~~~~
17
+
18
+ Dataflow is an abstraction of computations. Neural Network training is a typical dataflow. It can be represented by computational graph.
19
+
20
+ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/dataflow.jpeg?raw=true
21
+ :alt: The dataflow graph from CS231n 2024 lecture 4
22
+
23
+ This figure [2]_ represents the computation graph of a polynomial function followed by a sigmoid function. In the data flow of neural network computation, each node represents an operator, and each edge represents the direction of forward/backward propagation. The computation graph determines the architecture of the neural network.
24
+
25
+ RL as a dataflow problem
26
+ ++++++++++++++++++++++++++++++++++++++++++++++
27
+
28
+ Reinforcement learning (RL) training can also be represented as a dataflow. Below is the dataflow graph that represents the PPO algorithm used in RLHF [3]_:
29
+
30
+ .. image:: https://picx.zhimg.com/70/v2-cb8ab5ee946a105aab6a563e92682ffa_1440w.avis?source=172ae18b&biz_tag=Post
31
+ :alt: PPO dataflow graph, credit to Zhihu 低级炼丹师
32
+
33
+ However, the dataflow of RL has fundamental differences compared with dataflow of neural network training as follows:
34
+
35
+ +--------------------------+--------------------------------------------------+---------------------+
36
+ | Workload | Node | Edge |
37
+ +--------------------------+--------------------------------------------------+---------------------+
38
+ | Neural Network Training | Operator (+/-/matmul/softmax) | Tensor movement |
39
+ +--------------------------+--------------------------------------------------+---------------------+
40
+ | Reinforcement Learning | High-level operators (rollout/model forward) | Data Movement |
41
+ +--------------------------+--------------------------------------------------+---------------------+
42
+
43
+ In the case of tabular reinforcement learning, each operator is a simple scalar math operation (e.g., bellman update). In deep reinforcement learning(DRL), each operator is a high-level neural network computation such as model inference/update. This makes RL a two-level dataflow problem:
44
+
45
+ - Control flow: defines how the high-level operators are executed (e.g., In PPO, we first perform rollout. Then, we perform advantage computation. Finally, we perform training). It expresses the **core logics of RL algorithms**.
46
+ - Computation flow: defines the dataflow of **neural network computation** (e.g., model forward/backward/optimizer).
47
+
48
+
49
+ Design Choices
50
+ ~~~~~~~~~~~~~~~~~~~~
51
+ The model size used in DRL before the LLM era is typically small. Thus, the high-level neural network computation can be done in a single process. This enables embedding the computation flow inside the control flow as a single process.
52
+
53
+ However, in the LLM era, the computation flow (e.g., training neural network) becomes a multi-process program. This naturally leads to two design choices:
54
+
55
+ 1. Convert the control flow into a multi-process program as well. Then colocate with computation flow (unified multi-controller)
56
+
57
+ - Advantages:
58
+
59
+ - Achieves the **optimal performance** under fixed computation flow and control flow as the communication overhead in both training and data transfer is minimized.
60
+
61
+ - Disadvantages:
62
+
63
+ - The computation and/or control flow is **hard to reuse** from software perspective as computation code is coupled with specific controller code. For example, the training loop of PPO is generic. Say we have an PPO training flow implemented with a specific computation flow such as FSDP. Neither the control flow or computation flow can be reused if we want to switch the computation flow from FSDP to Megatron, due to the coupling of control and computation flows.
64
+ - Requires more efforts from the user under flexible and dynamic control flows, due to the multi-process nature of the program.
65
+
66
+ 2. Separate the flows: single process for the control flow and multi-process for computation flow
67
+
68
+ - Advantages:
69
+
70
+ - The computation flow defined elsewhere can be **easily reused** after the decoupling.
71
+ - The controller runs on a single process. Implementing a new RL algorithm with a **different control flow is simple and easy**.
72
+
73
+ - Disadvantages:
74
+
75
+ - Additional **data communication overhead** each time the controller process and computatation processes interact. The data has to be sent back and forth.
76
+
77
+ In verl, the latter strategy with separate control flow and computation flow is adopted. verl is designed to decouple the control flow of RL algorithms, and the implementation of computation engines.
78
+
79
+ Overall Execution Diagram
80
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
81
+
82
+ Below is a simplified diagram denoting the execution of a reinforcement learning job. In the diagram, the controller runs on a single process, while the generator/actor workers, critic workers run on multiple processes, placed with specific resource groups. For rollout, the controller passes the data to the generator to perform sample generation. When the rollout is done, the data is passed back to controller for the next step of the algorithm. Similar execution is done for other workers. With the hybrid controller design, the data flow and computation is decoupled to provide both efficiency in computation and flexiblity in defining algorithm training loops.
83
+
84
+ .. figure:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/driver_worker.png?raw=true
85
+ :alt: The execution diagram
86
+
87
+ Codebase walkthrough (PPO)
88
+ ------------------------------------------------
89
+
90
+ Entry function
91
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
92
+ Code: https://github.com/volcengine/verl/blob/main/verl/trainer/main_ppo.py
93
+
94
+ In this file, we define a remote function `main_task` that serves as the controller (driver) process as shown in the above figure. We also define a ``RewardManager``, where users can customize their reward function based on the data source in the dataset. Note that `RewardManager` should return the final token-level reward that is optimized by RL algorithms. Note that users can combine model-based rewards and rule-based rewards.
95
+ The ``main_task`` constructs a RayPPOTrainer instance and launch the fit. Note that ``main_task`` **runs as a single process**.
96
+
97
+ We highly recommend that the ``main_task`` is NOT scheduled on the head of the ray cluster because ``main_task`` will consume a lot of memory but the head usually contains very few resources.
98
+
99
+ Ray trainer
100
+ ~~~~~~~~~~~~~~~~~~~~
101
+ Code: https://github.com/volcengine/verl/blob/main/verl/trainer/ppo/ray_trainer.py
102
+
103
+ The RayPPOTrainer manages
104
+
105
+ - Worker and WorkerGroup construction
106
+ - Runs the main loop of PPO algorithm
107
+
108
+ Note that, the fit function of RayPPOTrainer **runs as a single process**.
109
+
110
+ Worker and WorkerGroup construction
111
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
112
+
113
+ Each workerGroup manages a list of workers that runs remotely. Note that the worker group runs in the process of its construtor.
114
+ Each worker inside the WorkerGroup runs on a GPU. The worker group serves as a proxy for the controller process to interact with a list of workers, in order to perform certain computations. **In order to do so, we have to bind the methods of the worker into the method of the WorkerGroup and define the data dispatch and data collection**. This is done via simple decoration that will be introduced in the Worker definition section.
115
+
116
+ For example, in PPO, we define 3 worker groups:
117
+
118
+ - ActorRolloutRef: manages actor, rollout and reference policy. ActorRolloutRefWorker can be instantiated as a single actor, a single rollout, a single reference policy, a combined actor/rollout or a combined actor/rollout/ref. This design is aimed for the maximum code reuse in various scenarios. The reason for colocating actor and rollout is for fast weight transfer using nccl. The reason for coloating actor and reference is to implement an efficient lora PPO as the reference policy is simply the base model of PPO in lora.
119
+ - Critic: manages the critic model
120
+ - Reward: manages the reward model
121
+
122
+ The worker group will be constructed on the resource pool it designates. The resource pool is a set of GPUs in the ray cluster.
123
+
124
+ Worker definition
125
+ ~~~~~~~~~~~~~~~~~~~~
126
+
127
+ .. _ActorRolloutRefWorker: https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py
128
+
129
+ We take `ActorRolloutRefWorker <_ActorRolloutRefWorker>`_ for an exmaple.
130
+ The APIs it should expose to the controller process are:
131
+
132
+ - init_model: build the underlying model
133
+ - generate_sequences: given prompts, generate responses
134
+ - compute_log_prob: compute the log-probability of a generated sequence using actor
135
+ - compute_ref_log_prob: compute the log-probability of a generated sequence using reference policy
136
+ - save_checkpoint: save the checkpoint
137
+
138
+ Note that these methods are defined in the worker that can only be invoked via remote calls. For example, if the controller process wants to initialize the model, it has to call
139
+
140
+ .. code-block:: python
141
+
142
+ for worker in actor_rollout_ref_wg:
143
+ worker.init_model.remote()
144
+
145
+ If the controller process wants to generate sequences, it has to call
146
+
147
+ .. code-block:: python
148
+
149
+ data = xxx
150
+ # split the data into dp chunks
151
+ data_dp_lst = data.split(dp_size)
152
+ output_dp_lst = []
153
+ for i, worker in enumerate(actor_rollout_ref_wg):
154
+ output_future = worker.generate_sequences.remote(data_dp_lst[i])
155
+ output_dp_lst.append(output_future)
156
+ output = torch.cat(ray.get(output_dp_lst), dim=0)
157
+
158
+ We observe that controll process calling worker group methods in general can be divided into 3 parts:
159
+
160
+ - Split the data into data parallel sizes
161
+ - Dispatch the corresponding data into each worker
162
+ - Collect and concatenate the data when the computation finishes
163
+
164
+ In verl, we design a syntax sugar to encapsulate the 3 processes into a single call from the controller process.
165
+
166
+ .. code-block:: python
167
+
168
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
169
+ def generate_sequences(data):
170
+ ...
171
+
172
+ # on the driver
173
+ output = actor_rollout_ref_wg.generate_sequences(data)
174
+
175
+ We decorate the method of the worker with a ``register`` that explicitly defines how the input data should be splitted and dispatch to each worker, and how the output data should be collected and concatenated by the controller. For example, ``Dispatch.DP_COMPUTE_PROTO`` splits the input data into dp chunks, dispatch each data to each worker, collect the output and concatenate the results. Note that this function requires the input and output to be a DataProto defined here (https://github.com/volcengine/verl/blob/main/verl/protocol.py).
176
+
177
+
178
+ PPO main loop
179
+ ~~~~~~~~~~~~~~~~~~~~
180
+ With the aforementioned APIs, we can implement the main loop of PPO as if it is a single process program
181
+
182
+ .. code-block:: python
183
+
184
+ for prompt in dataloader:
185
+ output = actor_rollout_ref_wg.generate_sequences(prompt)
186
+ old_log_prob = actor_rollout_ref_wg.compute_log_prob(output)
187
+ ref_log_prob = actor_rollout_ref_wg.compute_ref_log_prob(output)
188
+ values = critic_wg.compute_values(output)
189
+ rewards = reward_wg.compute_scores(output)
190
+ # compute_advantages is running directly on the control process
191
+ advantages = compute_advantages(values, rewards)
192
+ output = output.union(old_log_prob)
193
+ output = output.union(ref_log_prob)
194
+ output = output.union(values)
195
+ output = output.union(rewards)
196
+ output = output.union(advantages)
197
+ # update actor
198
+ actor_rollout_ref_wg.update_actor(output)
199
+ critic.update_critic(output)
200
+
201
+ Takeaways
202
+ ~~~~~~~~~~~~~~~~~~~~
203
+ - This programming paradigm enables users to use different computation backend without modification of the control process.
204
+ - This programming paradigm enables flexible placement (by changing the mapping of WorkerGroup and ResourcePool) without modification of the control process.
205
+
206
+ Repository organization
207
+ ------------------------------------------------
208
+
209
+ Important code files in the repository are organized as below:
210
+
211
+ .. code-block:: bash
212
+
213
+ verl # the verl package
214
+ trainer
215
+ main_ppo.py # the entrypoint for RL training
216
+ ppo
217
+ ray_trainer.py # the training loop for RL algorithms such as PPO
218
+ fsdp_sft_trainer.py # the SFT trainer with FSDP backend
219
+ config
220
+ generation.yaml # configuration template for rollout
221
+ ppo_trainer.yaml # configuration template for the RL trainer
222
+ workers
223
+ protocol.py # the interface of DataProto
224
+ fsdp_workers.py # the FSDP worker interfaces: ActorRolloutRefWorker, CriticWorker, RewardModelWorker
225
+ megatron_workers.py # the Megatron worker interfaces: ActorRolloutRefWorker, CriticWorker, RewardModelWorker
226
+ actor
227
+ dp_actor.py # data parallel actor with FSDP backend
228
+ megatron_actor.py # nD parallel actor with Megatron backend
229
+ critic
230
+ dp_critic.py # data parallel critic with FSDP backend
231
+ megatron_critic.py # nD parallel critic with FSDP backend
232
+ reward_model
233
+ megatron
234
+ reward_model.py # reward model with Megatron backend
235
+ rollout
236
+ vllm
237
+ vllm_rollout.py # rollout with vllm backend
238
+ hf_rollout.py # rollout with huggingface TGI backend
239
+ sharding_manager
240
+ fsdp_ulysses.py # data and model resharding when using FSDP + ulysses
241
+ fsdp_vllm.py # data and model resharding when using FSDP + ulysses + vllm
242
+ megatron_vllm.py # data and model resharding when using Megatron + vllm
243
+ utils
244
+ dataset # datasets for SFT/RM/RL
245
+ reward_score # function based reward
246
+ gsm8k.py # reward function for gsm8k dataset
247
+ math.py # reward function for math dataset
248
+ seqlen_balancing.py # the sequence balance optimization
249
+ models
250
+ llama # Megatron implementation for llama, deepseek, mistral, etc
251
+ transformers # ulysses integration with transformer models such as llama, qwen, etc
252
+ weight_loader_registery.py # registry of weight loaders for loading hf ckpt into Megatron
253
+ third_party
254
+ vllm # adaptor for vllm's usage in RL
255
+ vllm_v_0_6_3 # vllm v0.6.3 adaptor
256
+ llm.py # entrypoints for generate, sync_model_weight, offload_model_weights
257
+ parallel_state.py # vllm related device mesh and process groups
258
+ dtensor_weight_loaders.py # weight loader for huggingface models with FSDP
259
+ megatron_weight_loaders.py # weight loader for Megatron models
260
+ vllm_spmd # vllm >= v0.7 adaptor (coming soon)
261
+ examples # example scripts
262
+ tests # integration and unit tests
263
+ .github # the configuration of continuous integration tests
264
+
265
+
266
+ .. [1] HybridFlow: A Flexible and Efficient RLHF Framework: https://arxiv.org/abs/2409.19256v2
267
+ .. [2] Data flow graph credit to CS231n 2024 lecture 4: https://cs231n.stanford.edu/slides/2024/lecture_4.pdf
268
+ .. [3] PPO dataflow graph credit to 低级炼丹师 from Zhihu​: https://zhuanlan.zhihu.com/p/635757674
269
+ .. [4] RLFlow
deep_search/DeepResearcher/docs/index.rst ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Welcome to verl's documentation!
2
+ ================================================
3
+
4
+ .. _hf_arxiv: https://arxiv.org/pdf/2409.19256
5
+
6
+ verl is a flexible, efficient and production-ready RL training framework designed for large language models (LLMs) post-training. It is an open source implementation of the `HybridFlow <hf_arxiv>`_ paper.
7
+
8
+ verl is flexible and easy to use with:
9
+
10
+ - **Easy extension of diverse RL algorithms**: The Hybrid programming model combines the strengths of single-controller and multi-controller paradigms to enable flexible representation and efficient execution of complex Post-Training dataflows. Allowing users to build RL dataflows in a few lines of code.
11
+
12
+ - **Seamless integration of existing LLM infra with modular APIs**: Decouples computation and data dependencies, enabling seamless integration with existing LLM frameworks, such as PyTorch FSDP, Megatron-LM and vLLM. Moreover, users can easily extend to other LLM training and inference frameworks.
13
+
14
+ - **Flexible device mapping and parallelism**: Supports various placement of models onto different sets of GPUs for efficient resource utilization and scalability across different cluster sizes.
15
+
16
+ - Readily integration with popular HuggingFace models
17
+
18
+
19
+ verl is fast with:
20
+
21
+ - **State-of-the-art throughput**: By seamlessly integrating existing SOTA LLM training and inference frameworks, verl achieves high generation and training throughput.
22
+
23
+ - **Efficient actor model resharding with 3D-HybridEngine**: Eliminates memory redundancy and significantly reduces communication overhead during transitions between training and generation phases.
24
+
25
+ --------------------------------------------
26
+
27
+ .. _Contents:
28
+
29
+ .. toctree::
30
+ :maxdepth: 5
31
+ :caption: Quickstart
32
+
33
+ start/install
34
+ start/quickstart
35
+
36
+ .. toctree::
37
+ :maxdepth: 4
38
+ :caption: Programming guide
39
+
40
+ hybrid_flow
41
+
42
+ .. toctree::
43
+ :maxdepth: 5
44
+ :caption: Data Preparation
45
+
46
+ preparation/prepare_data
47
+ preparation/reward_function
48
+
49
+ .. toctree::
50
+ :maxdepth: 5
51
+ :caption: Configurations
52
+
53
+ examples/config
54
+
55
+ .. toctree::
56
+ :maxdepth: 2
57
+ :caption: PPO Example
58
+
59
+ examples/ppo_code_architecture
60
+ examples/gsm8k_example
61
+
62
+ .. toctree::
63
+ :maxdepth: 1
64
+ :caption: PPO Trainer and Workers
65
+
66
+ workers/ray_trainer
67
+ workers/fsdp_workers
68
+ workers/megatron_workers
69
+
70
+ .. toctree::
71
+ :maxdepth: 1
72
+ :caption: Performance Tuning Guide
73
+
74
+ perf/perf_tuning
75
+ README_vllm0.7.md
76
+
77
+ .. toctree::
78
+ :maxdepth: 1
79
+ :caption: Experimental Results
80
+
81
+ experiment/ppo
82
+
83
+ .. toctree::
84
+ :maxdepth: 1
85
+ :caption: Advance Usage and Extension
86
+
87
+ advance/placement
88
+ advance/dpo_extension
89
+ advance/fsdp_extension
90
+ advance/megatron_extension
91
+
92
+ .. toctree::
93
+ :maxdepth: 1
94
+ :caption: API References
95
+
96
+ data.rst
97
+
98
+
99
+ .. toctree::
100
+ :maxdepth: 1
101
+ :caption: FAQ
102
+
103
+ faq/faq
104
+
105
+ Contribution
106
+ -------------
107
+
108
+ verl is free software; you can redistribute it and/or modify it under the terms
109
+ of the Apache License 2.0. We welcome contributions.
110
+ Join us on `GitHub <https://github.com/volcengine/verl>`_, `Slack <https://join.slack.com/t/verlgroup/shared_invite/zt-2w5p9o4c3-yy0x2Q56s_VlGLsJ93A6vA>`_ and `Wechat <https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/WeChat.JPG>`_ for discussions.
111
+
112
+ Code formatting
113
+ ^^^^^^^^^^^^^^^^^^^^^^^^
114
+ We use yapf (Google style) to enforce strict code formatting when reviewing MRs. Run yapf at the top level of verl repo:
115
+
116
+ .. code-block:: bash
117
+
118
+ pip3 install yapf
119
+ yapf -ir -vv --style ./.style.yapf verl examples tests
deep_search/DeepResearcher/docs/perf/perf_tuning.rst ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Performance Tuning Guide
2
+ ==============================
3
+
4
+ Author: `Guangming Sheng <https://github.com/PeterSH6>`_
5
+
6
+ In this section, we will discuss how to tune the performance of all the stages in verl, including:
7
+
8
+ 1. Rollout generation throughput.
9
+
10
+ 2. Enable `use_remove_padding=True` for sequence packing (i.e., data packing and remove padding).
11
+
12
+ 3. Batch size tuning for forward and backward computation
13
+
14
+ 4. Enable ``use_dynamic_bsz=True`` for higher throughput.
15
+
16
+ 5. Utilize Ulysses Sequence Parallel for Long Context Training
17
+
18
+ 6. LigerKernel for SFT performance optimization
19
+
20
+ Rollout Generation Tuning
21
+ --------------------------
22
+
23
+ verl currently supports two rollout backends: vLLM and TGI (with SGLang support coming soon).
24
+
25
+ Below are key factors for tuning vLLM-based rollout. Before tuning, we recommend setting ``actor_rollout_ref.rollout.disable_log_stats=False`` so that rollout statistics are logged.
26
+
27
+ - Increase ``gpu_memory_utilization``. The vLLM pre-allocates GPU KVCache by using gpu_memory_utilization% of the remaining memory.
28
+ However, if model parameters and optimizer states are not offloaded, using too high a fraction can lead to OOM.
29
+ A value between 0.5 and 0.7 often strikes a good balance between high throughput and avoiding OOM.
30
+
31
+ - Adjust ``max_num_seqs`` or ``max_num_batched_tokens``.
32
+ If the GPU cache utilization is relatively low in the log, increase ``max_num_seqs`` or ``max_num_batched_tokens``
33
+ can enlarge the effective batch size in the decoding stage, allowing more concurrent requests per batch.
34
+ We recommend setting ``max_num_batched_tokens > 2048`` for higher throughput.
35
+
36
+ - Use a smaller ``tensor_parallel_size``.
37
+ When GPU resources allow, a smaller tensor parallel size spawns more vLLM replicas.
38
+ Data parallelism (DP) can yield higher throughput than tensor parallelism (TP), but also increases KVCache consumption.
39
+ Carefully balance the trade-off between more replicas and higher memory usage.
40
+ Our experient in Sec. 8.4 of `HybridFlow paper <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score/gsm8k.py>`_ evaluate this trade-off.
41
+
42
+ More tuning details such as dealing with Preemption and Chunked-prefill
43
+ can be found in `vLLM official tuning guide <https://docs.vllm.ai/en/latest/performance/optimization.html>`_
44
+
45
+ The performance of vllm can be further increased if upgrading from v0.6.3 to v0.7. See https://github.com/volcengine/verl/blob/main/docs/README_vllm0.7.md for details on how to upgrade.
46
+
47
+ Enable remove padding (sequence packing)
48
+ -----------------------------------------
49
+
50
+ Currently, for llama, mistral, gemma1 and qwen based models, users can enable `use_remove_padding=True` to utilize the
51
+ sequence packing implementation provided by transformers library.
52
+
53
+ For other models, transformers library may also support it but we haven't tested it yet.
54
+ Users can add the desired model config to the `test_transformer.py <https://github.com/volcengine/verl/blob/main/tests/model/test_transformer.py#L24>`_ file.
55
+ And test its functionaility by running the following command:
56
+
57
+ .. code-block:: bash
58
+
59
+ pytest -s tests/model/test_transformer.py
60
+
61
+ If the test passes, you can add your desired model into the model `registry.py <https://github.com/volcengine/verl/blob/main/verl/models/registry.py#L24>`_ file.
62
+ Then, you can enjoy the performance boost of sequence packing
63
+ and welcome to PR your tested model to verl!
64
+
65
+
66
+ Batch Size Tuning
67
+ -----------------
68
+
69
+ To achieve higher throughput in experience preparation (i.e., model fwd) and model update (i.e., actor/critic fwd/bwd),
70
+ users may need to tune the ``*micro_batch_size_per_gpu`` for different computation.
71
+
72
+ In verl, the core principle for setting batch sizes is:
73
+
74
+ - **Algorithmic metrics** (train batch size, PPO mini-batch size) are *global* (from a single-controller perspective),
75
+ normalized in each worker. See the `normalization code <https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py#L120-L122>`_.
76
+
77
+ - **Performance-related parameters** (micro batch size, max token length for dynamic batch size) are *local* parameters that define the per-GPU data allocations.
78
+ See the `normalization code <https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py#L127>`_.
79
+
80
+ .. note:: In your training script, please use ``*micro_batch_size_per_gpu`` instead of ``*micro_batch_size``.
81
+ So that you don't need to consider the normalization of the ``micro_batch_size`` and ``micro_batch_size`` will be deprecated.
82
+
83
+ Batch Size Tuning tips
84
+ """"""""""""""""""""""
85
+
86
+ Therefore, users may need to tune the ``*micro_batch_size_per_gpu`` to accelerate training. Here're some tips:
87
+
88
+ 1. **Enable gradient checkpointing**:
89
+ Set ``actor_rollout_ref.model.enable_gradient_checkpointing=True`` and ``critic.model.enable_gradient_checkpointing=True``.
90
+ This often allows for larger micro-batch sizes and will be beneficial for large mini-batch training.
91
+
92
+ 2. Increase the ``*micro_batch_size_per_gpu`` as much as possible till equals to normalized ``mini_batch_size``.
93
+
94
+ 3. **Use larger forward-only parameters**:
95
+ Forward only parameter, such as ``actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu``,
96
+ ``actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu``, ``critic.forward_micro_batch_size_per_gpu`` could be larger (e.g., 2x) than training related micro batch sizes,
97
+ such as ``actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu``, ``critic.ppo_micro_batch_size_per_gpu``.
98
+
99
+ 4. **Allow larger micro-batch sizes for Critic and Reward models**:
100
+ micro batch size of Critic and Reward model could be larger than Actor model. This is because the actor model has much larger vocab size in the final layer.
101
+
102
+
103
+ Tuning for Dynamic Batch Size
104
+ -----------------------------
105
+
106
+ Dynamic batch size is a technique that allows the model to process similar number of tokens in a single forward pass (with different actual batch sizes).
107
+ This can significantly improve the training efficiency and reduce the memory usage.
108
+
109
+ To utilize this technique, users can set ``use_dynamic_bsz=True`` in actor, ref, critic and reward models.
110
+ With ``use_dynamic_bsz=True``, users don't need to tune ``*micro_batch_size_per_gpu``.
111
+ Instead, users should tune the following parameters:
112
+
113
+ - ``actor_rollout_ref.actor.ppo_max_token_len_per_gpu``, ``critic.ppo_max_token_len_per_gpu``:
114
+ The maximum number of tokens to be processed in fwd and bwd of ``update_policy`` and ``update_critic``.
115
+
116
+ - ``actor_rollout_ref.ref.log_prob_max_token_len_per_gpu`` and ``actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu``:
117
+ The maximum number of tokens to be processed in a the fwd computation of ``compute_log_prob`` and ``comptue_ref_log_prob``.
118
+
119
+ - ``critic.forward_micro_batch_size_per_gpu``, ``reward_model.forward_micro_batch_size_per_gpu``:
120
+ The maximum number of tokens to be processed in a the fwd computation of ``compute_values``, ``compute_rm_score``.
121
+
122
+ Dynamic Batch Size Tuning tips
123
+ """"""""""""""""""""""""""""""
124
+
125
+ Here're some tips to tune the above parameters:
126
+
127
+ 1. **Increase** ``actor_rollout_ref.actor.ppo_max_token_len_per_gpu``
128
+ Make it at least 2 x (max_prompt_length + max_response_length). We set it to 3x in `run_qwen2-7b_rm_seq_balance.sh <https://github.com/volcengine/verl/blob/main/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance.sh#L25>`_.
129
+ Try to increase it to get higher throughput.
130
+
131
+ 2. **Forward-only parameters can be larger**:
132
+ Similar to the non-dynamic-batch scenario, forward-only token limits can exceed those used in forward/backward operations.
133
+
134
+ 3. **Use larger limits for Critic and Reward models**:
135
+ Critic and Reward parameters can be set at least 2× the Actor’s limits. For instance, we set them to 4× here:
136
+ `run_qwen2-7b_rm_seq_balance.sh <https://github.com/volcengine/verl/blob/main/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance.sh#L40>`_
137
+
138
+ .. :math:`\text{critic.ppo_max_token_len_per_gpu} = 2 \times \text{actor.ppo_max_token_len_per_gpu})`.
139
+
140
+ Ulysses Sequence Parallel for Long Context Training
141
+ ----------------------------------------------------
142
+
143
+ To utilize this technique, users can set ``ulysses_sequence_parallel_size>1`` in actor, ref, critic and reward models.
144
+
145
+ We support different model utilize different ulysses_sequence_parallel_size sizes.
146
+
147
+ To train log sequence (>32k), users may need to decrease the ``*micro_batch_size_per_gpu`` and ``*max_token_len_per_gpu`` to avoid OOM.
148
+
149
+ LigerKernel for SFT
150
+ ----------------------
151
+
152
+ LigerKernel is a high-performance kernel for Supervised Fine-Tuning (SFT) that can improve training efficiency. To enable LigerKernel in your SFT training:
153
+
154
+ 1. Install liger-kernel via ``pip3 install liger-kernel``. In your SFT configuration file (e.g., ``verl/trainer/config/sft_trainer.yaml``), set the ``use_liger`` parameter:
155
+
156
+ .. code-block:: yaml
157
+
158
+ model:
159
+ use_liger: True # Enable LigerKernel for SFT
160
+
161
+ 2. The default value is ``False``. Enable it only when you want to use LigerKernel's optimizations.
162
+
163
+ 3. LigerKernel is particularly useful for improving training performance in SFT scenarios.
164
+
deep_search/DeepResearcher/docs/preparation/prepare_data.rst ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Prepare Data for Post-Training
2
+ ========================================
3
+
4
+ Before starting the post-training job, we need to prepare the data for
5
+ the policy training. The data should be stored in the parquet format.
6
+
7
+ We provide several data preprocess scripts for different datasets,
8
+ including GSM8K, MATH, HelloSwag, Full_hh_rlhf. To prepare other datasets, we need
9
+ to follow the following steps: The data preprocess script can be divided
10
+ into two parts:
11
+
12
+ 1. The first part is the common part, which loads the dataset from
13
+ huggingface's ``datasets`` package. Then preprocess the datasets with
14
+ the ``make_map_fn`` and then store in the parquet format.
15
+
16
+ .. code:: python
17
+
18
+ import re
19
+ import os
20
+ import datasets
21
+
22
+ from verl.utils.hdfs_io import copy, makedirs
23
+ import argparse
24
+
25
+ # To extract the solution for each prompts in the dataset
26
+ # def extract_solution(solution_str):
27
+ # ...
28
+
29
+
30
+ if __name__ == '__main__':
31
+ parser = argparse.ArgumentParser()
32
+ parser.add_argument('--local_dir', default='/opt/tiger/gsm8k')
33
+ parser.add_argument('--hdfs_dir', default=None)
34
+
35
+ args = parser.parse_args()
36
+
37
+ num_few_shot = 5
38
+ data_source = 'openai/gsm8k'
39
+
40
+ dataset = datasets.load_dataset(data_source, 'main')
41
+
42
+ train_dataset = dataset['train']
43
+ test_dataset = dataset['test']
44
+
45
+ # Construct a `def make_map_fn(split)` for the corresponding datasets.
46
+ # ...
47
+
48
+ train_dataset = train_dataset.map(function=make_map_fn('train'), with_indices=True)
49
+ test_dataset = test_dataset.map(function=make_map_fn('test'), with_indices=True)
50
+
51
+ local_dir = args.local_dir
52
+ hdfs_dir = args.hdfs_dir
53
+
54
+ train_dataset.to_parquet(os.path.join(local_dir, 'train.parquet'))
55
+ test_dataset.to_parquet(os.path.join(local_dir, 'test.parquet'))
56
+
57
+ makedirs(hdfs_dir)
58
+
59
+ copy(src=local_dir, dst=hdfs_dir)
60
+
61
+ 2. The users are required to implement the ``make_map_fn()`` function
62
+ (as well as the ``extract_solution``) on their own to support
63
+ different datasets or tasks.
64
+
65
+ We already implemented the data preprocess of GSM8k, MATH, Hellaswag and Full_hh_rlhf
66
+ datasets. And we take the GSM8k dataset as an example:
67
+
68
+ **GSM8K**
69
+
70
+ In the ``make_map_fn``, each data field should consist of the following
71
+ 5 fields:
72
+
73
+ 1. ``data_source``: The name of the dataset. To index the corresponding
74
+ reward function in the ``RewardModule``
75
+ 2. ``prompt``: This field should be constructed in the format of
76
+ huggingface chat_template. The tokenizer in ``RLHFDataset`` will
77
+ apply chat template and tokenize the prompt.
78
+ 3. ``ability``: Define the task category.
79
+ 4. ``reward_model``: Currently, we only utilize the ``ground_truth``
80
+ field during evaluation. The ``ground_truth`` is computed by the
81
+ ``extract_solution`` function. **NOTED** that the implementation of
82
+ the corresponding reward function should align with this extracted
83
+ ``ground_truth``.
84
+ 5. ``extra_info``: Record some information of the current prompt. Not
85
+ use for now.
86
+
87
+ .. code:: python
88
+
89
+ def extract_solution(solution_str):
90
+ solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) # extract the solution after ####
91
+ assert solution is not None
92
+ final_solution = solution.group(0)
93
+ final_solution = final_solution.split('#### ')[1].replace(',', '')
94
+ return final_solution
95
+
96
+ instruction_following = "Let's think step by step and output the final answer after \"####\"."
97
+
98
+ # add a row to each data item that represents a unique id
99
+ def make_map_fn(split):
100
+
101
+ def process_fn(example, idx):
102
+ question = example.pop('question')
103
+
104
+ question = question + ' ' + instruction_following
105
+
106
+ answer = example.pop('answer')
107
+ solution = extract_solution(answer)
108
+ data = {
109
+ "data_source": data_source,
110
+ "prompt": [{
111
+ "role": "user",
112
+ "content": question
113
+ }],
114
+ "ability": "math",
115
+ "reward_model": {
116
+ "style": "rule",
117
+ "ground_truth": solution
118
+ },
119
+ "extra_info": {
120
+ 'split': split,
121
+ 'index': idx
122
+ }
123
+ }
124
+ return data
125
+
126
+ return process_fn
deep_search/DeepResearcher/docs/preparation/reward_function.rst ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Implement Reward Function for Dataset
2
+ ======================================
3
+
4
+ For each dataset, we need to implement a reward function or utilize a reward model to compute the rewards for the generated responses.
5
+ We already pre-implemented some reward functions in `reward_score directory <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score>`_.
6
+
7
+ Currently, we support reward functions for GSM8k and MATH datasets. For RLHF datasets (e.g.,
8
+ full_hh_rlhf) and Code Generation (e.g., APPS), we utilize reward model
9
+ and SandBox (will opensource soon) for evaluation respectively.
10
+
11
+ RewardManager
12
+ -------------
13
+
14
+ In the entrypoint of the PPO Post-Training script `main_ppo.py <https://github.com/volcengine/verl/blob/main/verl/trainer/main_ppo.py#L33>`_,
15
+ we implement a ``RewardManager`` that utilze pre-implemented reward functions to compute the scores for each response.
16
+
17
+ In the ``RewardManager``, we implemented a ``__call__`` function to
18
+ compute the score for each response.
19
+ All the reward functions are executed by ``compute_score_fn``.
20
+ The input is a ``DataProto``, which includes:
21
+
22
+ - ``input_ids``, ``attention_mask``: ``input_ids`` and ``attention_mask`` after applying
23
+ chat_template, including prompt and response
24
+ - ``responses``: response tokens
25
+ - ``ground_truth``: The ground truth string of the current prompt.
26
+ Stored in ``non_tensor_batch`` in the ``DataProto``, which should be
27
+ preprocessed in the parquet files.
28
+ - ``data_source``: The dataset name of the current prompt. Stored in
29
+ ``non_tensor_batch`` in the ``DataProto``, which should be
30
+ preprocessed in the parquet files.
31
+
32
+ After detokenize the responses, the responses string and the ground
33
+ truth string will be input to the ``compute_score_fn`` to compute the
34
+ score for each response.
35
+
36
+ Reward Functions
37
+ ----------------
38
+ We already pre-implemented some reward functions in `reward_score directory <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score>`_.
39
+
40
+ - In the `GSM8k example <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score/gsm8k.py>`_, we
41
+ force the response to output the final answer after four ####, then
42
+ use string matching to compare with the ground truth. If completely
43
+ correct, score 1 point; if the format is correct, score 0.1 points; if
44
+ the format is incorrect, score 0 points.
45
+ - In the `MATH example <https://github.com/volcengine/verl/blob/main/verl/utils/reward_score/math.py>`_, we follow
46
+ the implementation in `lm-evaluation-harness repository <https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hendrycks_math/utils.py>`_.
deep_search/DeepResearcher/docs/requirements-docs.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # markdown suport
2
+ recommonmark
3
+ # markdown table suport
4
+ sphinx-markdown-tables
5
+
6
+ # theme default rtd
7
+
8
+ # crate-docs-theme
9
+ sphinx-rtd-theme
10
+
11
+ # pin tokenizers version to avoid env_logger version req
12
+ tokenizers==0.19.1
deep_search/DeepResearcher/docs/start/install.rst ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Installation
2
+ ============
3
+
4
+ Requirements
5
+ ------------
6
+
7
+ - **Python**: Version >= 3.9
8
+ - **CUDA**: Version >= 12.1
9
+
10
+ verl supports various backends. Currently, the following configurations are available:
11
+
12
+ - **FSDP** and **Megatron-LM** (optional) for training.
13
+ - **vLLM** and **TGI** for rollout generation, **SGLang** support coming soon.
14
+
15
+ Training backends
16
+ ------------------
17
+
18
+ We recommend using **FSDP** backend to investigate, research and prototype different models, datasets and RL algorithms. The guide for using FSDP backend can be found in :doc:`FSDP Workers<../workers/fsdp_workers>`.
19
+
20
+ For users who pursue better scalability, we recommend using **Megatron-LM** backend. Currently, we support Megatron-LM v0.4 [1]_. The guide for using Megatron-LM backend can be found in :doc:`Megatron-LM Workers<../workers/megatron_workers>`.
21
+
22
+
23
+ Install from docker image
24
+ -------------------------
25
+
26
+ We provide pre-built Docker images for quick setup.
27
+
28
+ Image and tag: ``verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3``. See files under ``docker/`` for NGC-based image or if you want to build your own.
29
+
30
+ 1. Launch the desired Docker image:
31
+
32
+ .. code:: bash
33
+
34
+ docker run --runtime=nvidia -it --rm --shm-size="10g" --cap-add=SYS_ADMIN -v <image:tag>
35
+
36
+
37
+ 2. Inside the container, install verl:
38
+
39
+ .. code:: bash
40
+
41
+ # install the nightly version (recommended)
42
+ git clone https://github.com/volcengine/verl && cd verl && pip3 install -e .
43
+ # or install from pypi via `pip3 install verl`
44
+
45
+
46
+ 3. Setup Megatron (optional)
47
+
48
+ If you want to enable training with Megatron, Megatron code must be added to PYTHONPATH:
49
+
50
+ .. code:: bash
51
+
52
+ cd ..
53
+ git clone -b core_v0.4.0 https://github.com/NVIDIA/Megatron-LM.git
54
+ cp verl/patches/megatron_v4.patch Megatron-LM/
55
+ cd Megatron-LM && git apply megatron_v4.patch
56
+ pip3 install -e .
57
+ export PYTHONPATH=$PYTHONPATH:$(pwd)
58
+
59
+
60
+ You can also get the Megatron code after verl's patch via
61
+
62
+ .. code:: bash
63
+
64
+ git clone -b core_v0.4.0_verl https://github.com/eric-haibin-lin/Megatron-LM
65
+ export PYTHONPATH=$PYTHONPATH:$(pwd)/Megatron-LM
66
+
67
+ Install from custom environment
68
+ ---------------------------------
69
+
70
+ To manage environment, we recommend using conda:
71
+
72
+ .. code:: bash
73
+
74
+ conda create -n verl python==3.9
75
+ conda activate verl
76
+
77
+ For installing the latest version of verl, the best way is to clone and
78
+ install it from source. Then you can modify our code to customize your
79
+ own post-training jobs.
80
+
81
+ .. code:: bash
82
+
83
+ # install verl together with some lightweight dependencies in setup.py
84
+ pip3 install torch==2.4.0 --index-url https://download.pytorch.org/whl/cu124
85
+ pip3 install flash-attn --no-build-isolation
86
+ git clone https://github.com/volcengine/verl.git
87
+ cd verl
88
+ pip3 install -e .
89
+
90
+
91
+ Megatron is optional. It's dependencies can be setup as below:
92
+
93
+ .. code:: bash
94
+
95
+ # apex
96
+ pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
97
+ git+https://github.com/NVIDIA/apex
98
+
99
+ # transformer engine
100
+ pip3 install git+https://github.com/NVIDIA/TransformerEngine.git@v1.7
101
+
102
+ # megatron core v0.4.0: clone and apply the patch
103
+ # You can also get the patched Megatron code patch via
104
+ # git clone -b core_v0.4.0_verl https://github.com/eric-haibin-lin/Megatron-LM
105
+ cd ..
106
+ git clone -b core_v0.4.0 https://github.com/NVIDIA/Megatron-LM.git
107
+ cd Megatron-LM
108
+ cp ../verl/patches/megatron_v4.patch .
109
+ git apply megatron_v4.patch
110
+ pip3 install -e .
111
+ export PYTHONPATH=$PYTHONPATH:$(pwd)
112
+
113
+
114
+ .. [1] Megatron v0.4 is supported with verl's patches to fix issues such as virtual pipeline hang. It will be soon updated with latest the version of upstream Megatron-LM without patches.
deep_search/DeepResearcher/docs/start/quickstart.rst ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _quickstart:
2
+
3
+ =========================================================
4
+ Quickstart: PPO training on GSM8K dataset
5
+ =========================================================
6
+
7
+ Post-train a LLM using GSM8K dataset.
8
+
9
+ Introduction
10
+ ------------
11
+
12
+ .. _hf_dataset_gsm8k: https://huggingface.co/datasets/gsm8k
13
+
14
+ In this example, we train an LLM to tackle the `GSM8k <hf_dataset_gsm8k>`_ task with function-based rewards. [1]_
15
+
16
+ Prerequisite:
17
+
18
+ - the latest version of ``verl`` and its dependencies installed following the installation guide. Using the docker image is recommended.
19
+
20
+ - an GPU with at least 24 GB HBM
21
+
22
+
23
+ Dataset Introduction
24
+ --------------------
25
+
26
+ GSM8k is a math problem dataset. The prompt is an elementary school
27
+ problem. The LLM model is asked to solve the math problem. Below is an example:
28
+
29
+ Prompt
30
+
31
+ Katy makes coffee using teaspoons of sugar and cups of water in the
32
+ ratio of 7:13. If she used a total of 120 teaspoons of sugar and cups
33
+ of water, calculate the number of teaspoonfuls of sugar she used.
34
+
35
+ Solution
36
+
37
+ The total ratio representing the ingredients she used to make the
38
+ coffee is 7+13 = <<7+13=20>>20 Since the fraction representing the
39
+ number of teaspoons she used is 7/20, she used 7/20\ *120 =
40
+ <<7/20*\ 120=42>>42 #### 42
41
+
42
+ Step 1: Prepare the dataset
43
+ ----------------------------
44
+
45
+ We preprocess the dataset in parquet format so that (1) it contains necessary fields for computing RL rewards and (2) is faster to read.
46
+
47
+ .. code-block:: bash
48
+
49
+ python3 examples/data_preprocess/gsm8k.py --local_dir ~/data/gsm8k
50
+
51
+ Step 2: Download a model for post-training
52
+ -------------------------------------------
53
+
54
+ In this example, we start with the ``Qwen2.5-0.5B-Instruct`` model.
55
+
56
+ If you want to perform SFT before RL, refer to the :doc:`Complete GSM8K Example<../examples/gsm8k_example>`, the `sft directory <https://github.com/volcengine/verl/blob/main/examples/sft/gsm8k>`_ and `SFT Trainer <https://github.com/volcengine/verl/blob/main/verl/trainer/fsdp_sft_trainer.py>`_ for further details.
57
+
58
+ .. code-block:: bash
59
+
60
+ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2.5-0.5B-Instruct')"
61
+
62
+ Step 3: Perform PPO training with the instruct model
63
+ ----------------------------------------------------------------------
64
+
65
+ **Reward Model/Function**
66
+
67
+ We use a pre-defined rule-based reward model. We force the model to produce a final
68
+ answer following 4 “#” as shown in the solution. We extract the final
69
+ answer from both the solution and model's output using regular
70
+ expression matching. We assign a reward of 1 to correct
71
+ answer, 0.1 to incorrect answer and 0 to no answer.
72
+
73
+ For mode details, please refer to `verl/utils/reward_score/gsm8k.py <https://github.com/volcengine/verl/blob/v0.1/verl/utils/reward_score/gsm8k.py>`_.
74
+
75
+ **Training Script**
76
+
77
+ Now let's run PPO training with the dataset and model above. [2]_
78
+
79
+
80
+ Set the ``data.train_files`` ,\ ``data.val_files``, ``actor_rollout_ref.model.path`` and ``critic.model.path`` based on your dataset and model names or paths.
81
+
82
+ .. code-block:: bash
83
+
84
+ PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \
85
+ data.train_files=$HOME/data/gsm8k/train.parquet \
86
+ data.val_files=$HOME/data/gsm8k/test.parquet \
87
+ data.train_batch_size=256 \
88
+ data.max_prompt_length=512 \
89
+ data.max_response_length=256 \
90
+ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \
91
+ actor_rollout_ref.actor.optim.lr=1e-6 \
92
+ actor_rollout_ref.actor.ppo_mini_batch_size=64 \
93
+ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
94
+ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
95
+ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
96
+ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
97
+ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
98
+ critic.optim.lr=1e-5 \
99
+ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \
100
+ critic.ppo_micro_batch_size_per_gpu=4 \
101
+ algorithm.kl_ctrl.kl_coef=0.001 \
102
+ trainer.logger=['console'] \
103
+ +trainer.val_before_train=False \
104
+ trainer.default_hdfs_dir=null \
105
+ trainer.n_gpus_per_node=1 \
106
+ trainer.nnodes=1 \
107
+ trainer.save_freq=10 \
108
+ trainer.test_freq=10 \
109
+ trainer.total_epochs=15 2>&1 | tee verl_demo.log
110
+
111
+ You are expected to see the following logs, indicating training in progress. The key metric ``val/test_score/openai/gsm8k`` is computed every ``trainer.test_freq`` steps:
112
+
113
+ .. code-block:: bash
114
+
115
+ step:0 - timing/gen:21.470 - timing/ref:4.360 - timing/values:5.800 - critic/kl:0.000 - critic/kl_coeff:0.001 - timing/adv:0.109 - timing/update_critic:15.664 - critic/vf_loss:14.947 - critic/vf_clipfrac:0.000 - critic/vpred_mean:-2.056 - critic/grad_norm:1023.278 - critic/lr(1e-4):0.100 - timing/update_actor:20.314 - actor/entropy_loss:0.433 - actor/pg_loss:-0.005 - actor/pg_clipfrac:0.000 - actor/ppo_kl:0.000 - actor/grad_norm:1.992 - actor/lr(1e-4):0.010 - critic/score/mean:0.004 - critic/score/max:1.000 - critic/score/min:0.000 - critic/rewards/mean:0.004 - critic/rewards/max:1.000 - critic/rewards/min:0.000 - critic/advantages/mean:-0.000 - critic/advantages/max:2.360 - critic/advantages/min:-2.280 - critic/returns/mean:0.003 - critic/returns/max:0.000 - critic/returns/min:0.000 - critic/values/mean:-2.045 - critic/values/max:9.500 - critic/values/min:-14.000 - response_length/mean:239.133 - response_length/max:256.000 - response_length/min:77.000 - prompt_length/mean:104.883 - prompt_length/max:175.000 - prompt_length/min:68.000
116
+ step:1 - timing/gen:23.020 - timing/ref:4.322 - timing/values:5.953 - critic/kl:0.000 - critic/kl_coeff:0.001 - timing/adv:0.118 - timing/update_critic:15.646 - critic/vf_loss:18.472 - critic/vf_clipfrac:0.384 - critic/vpred_mean:1.038 - critic/grad_norm:942.924 - critic/lr(1e-4):0.100 - timing/update_actor:20.526 - actor/entropy_loss:0.440 - actor/pg_loss:0.000 - actor/pg_clipfrac:0.002 - actor/ppo_kl:0.000 - actor/grad_norm:2.060 - actor/lr(1e-4):0.010 - critic/score/mean:0.000 - critic/score/max:0.000 - critic/score/min:0.000 - critic/rewards/mean:0.000 - critic/rewards/max:0.000 - critic/rewards/min:0.000 - critic/advantages/mean:0.000 - critic/advantages/max:2.702 - critic/advantages/min:-2.616 - critic/returns/mean:0.000 - critic/returns/max:0.000 - critic/returns/min:0.000 - critic/values/mean:-2.280 - critic/values/max:11.000 - critic/values/min:-16.000 - response_length/mean:232.242 - response_length/max:256.000 - response_length/min:91.000 - prompt_length/mean:102.398 - prompt_length/max:185.000 - prompt_length/min:70.000
117
+
118
+ Checkout :ref:`algo-baseline-page` for full training and validation logs for reference.
119
+
120
+ The checkpoint is saved at the following dir by default: ``checkpoints/${trainer.project_name}/${trainer.experiment_name}``
121
+
122
+ To enable ``wandb`` for experiment tracking, set the following configs:
123
+
124
+ .. code-block:: bash
125
+
126
+ trainer.logger=['console','wandb'] \
127
+ trainer.project_name=$YOUR_PROJECT_NAME \
128
+ trainer.experiment_name=$YOUR_RUN_NAME \
129
+
130
+ If you encounter out of memory issues with HBM less than 32GB, enable the following configs would help:
131
+
132
+ .. code-block:: bash
133
+
134
+ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \
135
+ critic.ppo_micro_batch_size_per_gpu=1 \
136
+
137
+ For the full set of configs, please refer to :ref:`config-explain-page` for detailed explanation and performance tuning.
138
+
139
+
140
+ .. [1] The original paper (https://arxiv.org/pdf/2110.14168) mainly focuses on training a verifier (a reward model) to solve math problems via Best-of-N sampling. In this example, we train an RL agent using a rule-based reward model.
141
+ .. [2] More training script examples for FSDP and Megatron-LM backend are stored in `examples/ppo_trainer <https://github.com/volcengine/verl/tree/main/examples/ppo_trainer>`_ directory.
deep_search/DeepResearcher/docs/workers/fsdp_workers.rst ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PyTorch FSDP Backend
2
+ ======================
3
+
4
+ We support PyTorch FSDP Backend by implementing various workers for
5
+ actor, critic, reference, rollout and reward models. We also implement
6
+ the ``FSDPVLLMShardingManager`` that reshard weight between FSDP and
7
+ vLLM in `fsdp_vllm.py <https://github.com/volcengine/verl/blob/main/verl/workers/sharding_manager/fsdp_vllm.py>`_.
8
+
9
+ **Pros**
10
+
11
+ - Readily support various models.
12
+
13
+ - Users only need to implement the corresponding
14
+ ``dtensor_weight_loader`` for weight synchronization between FSDP
15
+ and vLLM. While for ``hf_weight_loader``, users can directly apply
16
+ any models supported both in HF and vLLM without any code change.
17
+
18
+ - Easy to organize the forward and backward computation for each model.
19
+
20
+ **Cons**
21
+
22
+ - Poor scalability when it comes to large-scale models (e.g. Llama 70B
23
+ and 405B)
24
+ - The resharding overhead between actor and rollout could be larger than
25
+ Megatron-LM backend.
26
+
27
+ Due to the simplicity, we recommend using FSDP backend for algorithm
28
+ research and prototyping.
29
+
30
+ FSDP Workers
31
+ --------------
32
+
33
+ ActorRolloutRefWorker
34
+ ^^^^^^^^^^^^^^^^^^^^^
35
+
36
+ Actor/Rollout HybridEngine
37
+ ''''''''''''''''''''''''''
38
+
39
+ 1. HybridEngine, Actor and Rollout initialization API.
40
+
41
+ .. code:: python
42
+
43
+ @register(dispatch_mode=Dispatch.ONE_TO_ALL)
44
+ def init_model(self):
45
+
46
+ ``ONE_TO_ALL``: when calling the ``init_model`` function from the driver
47
+ process, each worker (on a GPU) will execute the following model
48
+ initialization process.
49
+
50
+ The initialization details of HybridEngine, Actor and Rollout are
51
+ highlighted below:
52
+
53
+ 1. ``DataParallelPPOActor`` implements the simple PPO computation logics
54
+ when the model is built with FSDP, including compute log prob, model
55
+ update.
56
+ 2. ``vLLMRollout`` support generation with vLLM. We modify the vLLM
57
+ Engine and make it executed under SPMD to fit into our
58
+ ``WorkerGroup`` design.
59
+ 3. ``FSDPVLLMShardingManager`` a context manager to perform actual
60
+ resharding between actor and rollout.
61
+
62
+ See `source code <https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py>`_. for more information.
63
+
64
+ 1. Generate sequence and recompute log prob
65
+
66
+ .. code:: python
67
+
68
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
69
+ def generate_sequences(self, prompts: DataProto):
70
+
71
+ - ``Dispatch.DP_COMPUTE_PROTO``: The data will be dispatched and
72
+ collected along the DP dimension
73
+
74
+ - In this function, the rollout model will perform auto-regressive
75
+ generation and the actor model will recompute the old log prob for the
76
+ generated response.
77
+
78
+ 3. Update actor model
79
+
80
+ .. code:: python
81
+
82
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
83
+ def update_actor(self, data: DataProto):
84
+
85
+ - Update the actor model weight using PPO & entropy loss.
86
+
87
+ ReferenceModel
88
+ ''''''''''''''
89
+
90
+ 1. Reference model initialization
91
+
92
+ The reference model is initialized using the same function as the actor
93
+ model without initializing the HybridEngine and Optimizer. Then the
94
+ actor model is also wrapped by the ``DataParallelPPOActor``.
95
+
96
+ 2. Compute reference log prob
97
+
98
+ .. code:: python
99
+
100
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
101
+ def compute_ref_log_prob(self, data: DataProto):
102
+
103
+ - In this function, the reference model will call the compute log prob
104
+ function in ``DataParallelPPOActor`` to compute the reference log
105
+ prob.
106
+
107
+ CriticWorker and RewardWorker
108
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
109
+
110
+ 1. Model initialization
111
+
112
+ Quite similar to reference model. The CriticWorker will perform
113
+ additional initialization for the Optimizer.
114
+
115
+ 2. Compute Values for CriticWorker
116
+
117
+ .. code:: python
118
+
119
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
120
+ def compute_values(self, data: DataProto):
121
+
122
+ 3. Update Critic
123
+
124
+ .. code:: python
125
+
126
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
127
+ def update_critic(self, data: DataProto):
128
+
129
+ 4. Compute Reward
130
+
131
+ .. code:: python
132
+
133
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
134
+ def compute_rm_score(self, data: DataProto):
135
+
136
+
137
+ HybridShard
138
+ ------------
139
+
140
+ We didn't support FSDP `HybridShard`. To support this, we may need to
141
+ construct a 2D device mesh and test the corresponding
142
+ ``dtensor_weight_loader`` and ``hf_weight_loader`` for each model.
deep_search/DeepResearcher/docs/workers/megatron_workers.rst ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Megatron-LM Backend
2
+ =====================
3
+
4
+ We support Megatron Backend by implementing various workers for actor,
5
+ critic, reference, rollout and reward models. We also implement the
6
+ ``3DHybridEngine`` using Megatron-LM and vLLM in `megatron_vllm.py <https://github.com/volcengine/verl/blob/main/verl/workers/sharding_manager/megatron_vllm.py>`_.
7
+
8
+ **Pros**
9
+
10
+ - Support 3D parallelism and sequence parallelism for best scalablility
11
+ and throughput.
12
+ - 3D HybridEngine can significantly reduce peak memory usage and reduce
13
+ weight synchronize overhead between actor and rollout.
14
+
15
+ **Cons**
16
+
17
+ - Users should implement their own models for Megatron-LM
18
+ - Users should implement the corresponding weight_loader to
19
+
20
+ - synchronize the model weight between actor (in Megatron) and rollout
21
+ (in vLLM).
22
+ - load weights from checkpoints to corresponding model in Megatron-LM
23
+
24
+ Megatron Workers
25
+ ----------------
26
+
27
+ MegatronWorker
28
+ ^^^^^^^^^^^^^^
29
+
30
+ ``MegatronWorker`` is the base class of different megatron worker
31
+ classes. In this class, ``get_megatron_global_info`` and
32
+ ``get_megatron_rank_info`` function to retrive the 3D parallel world
33
+ size and rank of each ``Worker`` running on specific GPU. These information
34
+ will be used in transfer protocol for Megatron Backend.
35
+
36
+ The following ``Worker`` class for different models will be utilized to
37
+ construct the ``WorkerGroup`` .
38
+
39
+ We implement various of APIs for each ``Worker`` class decorated by the
40
+ ``@register(dispatch_mode=)`` . These APIs can be called by the ray
41
+ driver process. The data can be correctly collect and dispatch following
42
+ the ``dispatch_mode`` on each function. The supported dispatch_model
43
+ (i.e., transfer protocols) can be found in `decorator.py <https://github.com/volcengine/verl/blob/main/verl/single_controller/base/decorator.py>`_.
44
+
45
+ ActorRolloutRefWorker
46
+ ^^^^^^^^^^^^^^^^^^^^^
47
+
48
+ This class is implemented for Actor/Rollout HybridEngine or for the
49
+ reference model to initialize their model and perform computation.
50
+
51
+ Actor/Rollout HybridEngine
52
+ ''''''''''''''''''''''''''
53
+
54
+ 1. HybridEngine, Actor and Rollout initialization API.
55
+
56
+ .. code:: python
57
+
58
+ @register(dispatch_mode=Dispatch.ONE_TO_ALL)
59
+ def init_model(self):
60
+
61
+ ``ONE_TO_ALL``: when calling the ``init_model`` function from the driver
62
+ process, each worker (on a GPU) will execute the following model
63
+ initialization process.
64
+
65
+ The initialization details of HybridEngine, Actor and Rollout are
66
+ highlighted below:
67
+
68
+ 1. ``AllGatherPPModel`` holds memory buffer for both Actor and Rollout
69
+ and support weight resharding between actor and rollout.
70
+ 2. ``MegatronPPOActor`` implements the simple PPO computation logics
71
+ when the model is built with Megatron, including compute log prob,
72
+ model update.
73
+ 3. ``vLLMRollout`` support generation with vLLM. We modify the vLLM
74
+ Engine and make it executed under SPMD to fit into our
75
+ ``WorkerGroup`` design.
76
+ 4. ``MegatronVLLMShardingManager`` a context manager to perform actual
77
+ resharding between actor and rollout.
78
+
79
+ See `source code <https://github.com/volcengine/verl/blob/main/verl/workers/megatron_workers.py#L63>`_ for more information.
80
+
81
+ .. code:: python
82
+
83
+ # Initialize the 3D HybridEngine
84
+ hybrid_engine = AllGatherPPModel(model_provider=megatron_actor_model_provider)
85
+ # Fetch the model at current rank
86
+ actor_module = hybrid_engine.this_rank_models
87
+ ...
88
+
89
+ # build actor model
90
+ self.actor = MegatronPPOActor(config=self.config.actor,
91
+ model_config=self.actor_model_config,
92
+ megatron_config=megatron_config,
93
+ actor_module=self.actor_module,
94
+ actor_optimizer=self.actor_optimizer,
95
+ actor_optimizer_config=self.actor_optim_config)
96
+
97
+ # build rollout
98
+ # rollout initialization
99
+ rollout = vLLMRollout(actor_module=params,
100
+ config=self.config.rollout,
101
+ tokenizer=self.tokenizer,
102
+ model_hf_config=self.actor_model_config,
103
+ train_tp=mpu.get_tensor_model_parallel_world_size())
104
+ # perform weight resharding between actor and rollout
105
+ sharding_manager = MegatronVLLMShardingManager(module=self.hybrid_engine,
106
+ inference_engine=rollout.inference_engine,
107
+ model_config=self.actor_model_config,
108
+ layer_name_mapping=layer_name_mapping)
109
+ ...
110
+
111
+ 2. Generate sequence and recompute log prob
112
+
113
+ .. code:: python
114
+
115
+ @register(dispatch_mode=Dispatch.MEGATRON_PP_AS_DP_PROTO)
116
+ def generate_sequences(self, prompts: DataProto):
117
+
118
+ - ``Dispatch.MEGATRON_PP_AS_DP_PROTO``: The PP dimension of the actor
119
+ model will be regarded as DP dimension. Then the driver process will
120
+ dispatch and collect the data according to this reorganization. This
121
+ is because, in HybridEngine, the actor weight, which usually applied
122
+ larger 3D parallel sizes, will be gathered along the PP dimension and
123
+ TP dimension. Therefore, the corresponding data should be dispatched
124
+ and collected through the 3D parallel group of the rollout model,
125
+ rather than the actor model. However, the world_size and rank
126
+ information can only be retrived from ``get_megatron_global_info`` and
127
+ ``get_megatron_rank_info``, which records the 3D information for the
128
+ actor model. Moreover, the data resharding inside TP dimension will be
129
+ processed within the HybridEngine.
130
+
131
+ - In this function, the rollout model will perform auto-regressive
132
+ generation and the actor model will recompute the old log prob for the
133
+ generated response.
134
+
135
+ 3. Update actor model
136
+
137
+ .. code:: python
138
+
139
+ @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
140
+ def update_actor(self, data: DataProto):
141
+
142
+ - ``Dispatch.MEGATRON_COMPUTE_PROTO``: User passes the data partitioned
143
+ by DP dimension. The data is dispatched to all tp/pp ranks within the
144
+ same dp group, and ultimately only collects output data from tp=0 and
145
+ the last pp.
146
+ - Update the actor model weight using PPO & entropy loss.
147
+
148
+ ReferenceModel
149
+ ''''''''''''''
150
+
151
+ 1. Reference model initialization
152
+
153
+ The reference model is initialized using the same function as the actor
154
+ model without initializing the HybridEngine and Optimizer. Then the
155
+ actor model is also wrapped by the ``MegatronPPOActor``.
156
+
157
+ 2. Compute reference log prob
158
+
159
+ .. code:: python
160
+
161
+ @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
162
+ def compute_ref_log_prob(self, data: DataProto):
163
+
164
+ - In this function, the reference model will call the compute log prob
165
+ function in ``MegatronPPOActor`` to compute the reference log prob.
166
+
167
+ CriticWorker and RewardWorker
168
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
169
+
170
+ 1. Model initialization
171
+
172
+ Quite similar to reference model. The CriticWorker will perform
173
+ additional initialization for the Optimizer.
174
+
175
+ 2. Compute Values for CriticWorker
176
+
177
+ .. code:: python
178
+
179
+ @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
180
+ def compute_values(self, data: DataProto):
181
+
182
+ 3. Update Critic
183
+
184
+ .. code:: python
185
+
186
+ @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
187
+ def update_critic(self, data: DataProto):
188
+
189
+ 4. Compute Reward
190
+
191
+ .. code:: python
192
+
193
+ @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
194
+ def compute_rm_score(self, data: DataProto):
195
+
196
+ Context Parallel
197
+ ----------------
198
+
199
+ This require the developer/contributor to implement the context parallel
200
+ both in Megatron-LM and models.
deep_search/DeepResearcher/docs/workers/ray_trainer.rst ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PPO Ray Trainer
2
+ ===============
3
+
4
+ We implement the RayPPOTrainer, which is a trainer runs on the driver
5
+ process on a single CPU/GPU node (default is CPU).
6
+
7
+ The PPORayTrainer include 3 core functions for data preparation,
8
+ WorkerGroup initialization and PPO training loop.
9
+
10
+ Data Preparation
11
+ ----------------
12
+
13
+ The ``PPORayTrainer``, as a single process, is responsible for loading a
14
+ complete batch of samples (prompts) from the dataset and then dispatch
15
+ to different worker_groups running on different GPUs.
16
+
17
+ To generalize the data loading, we implement the ``RLHFDataset`` class
18
+ to load the preprocessed parquet files, apply chat templates to the
19
+ prompts, add padding, truncate prompts that exceed max prompt length and
20
+ then tokenize.
21
+
22
+ .. code:: python
23
+
24
+ self.train_dataset = RLHFDataset(parquet_files=self.config.data.train_files,
25
+ tokenizer=self.tokenizer,
26
+ prompt_key=self.config.data.prompt_key,
27
+ max_prompt_length=self.config.data.max_prompt_length,
28
+ filter_prompts=True,
29
+ return_raw_chat=self.config.data.get('return_raw_chat', False),
30
+ truncation='error')
31
+
32
+ Then, the dataloader will iterate the dataset under PPO mini batch size.
33
+
34
+ WorkerGroup Initialization
35
+ --------------------------
36
+
37
+ We first introduce a basic implementation of initializing the
38
+ ``WorkerGroup`` of the actor model on a given set of GPUs.
39
+
40
+ .. code:: python
41
+
42
+ # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool
43
+ # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one.
44
+ # For Megatron backend, we recommend using max_colocate_count>1 that can utilize different WorkerGroup for differnt models
45
+ resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes,
46
+ use_gpu=True,
47
+ max_colocate_count=1)
48
+ # define actor rollout cls to be init on remote
49
+ actor_rollout_cls = RayClassWithInitArgs(cls=ActorRolloutWorker)
50
+ # define actor_rollout worker group
51
+ actor_rollout_worker_group = MegatronRayWorkerGroup(resource_pool=resource_pool,
52
+ ray_cls_with_init=actor_rollout_cls,
53
+ default_megatron_kwargs=config.actor_rollout.megatron)
54
+
55
+ Different WorkerGroups, like ``actor_rollout_worker_group`` ,
56
+ ``critic_worker_group`` and ``ref_worker_group`` lies on a separate
57
+ process in the above implementation.
58
+
59
+ The driver process can then call the distributed compute function within
60
+ the ``actor_rollout_worker_group`` and other roles to construct the RL
61
+ training loop.
62
+
63
+ For models colocated in the same set of GPUs, we further provide a
64
+ fine-grain optimization, which merge the ``worker_group`` of different roles
65
+ in the same process. This optimization can save the redundant
66
+ CUDA/distributed context in different processes.
67
+
68
+ .. code:: python
69
+
70
+ # initialize WorkerGroup
71
+ # NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
72
+ # you should not use `create_colocated_worker_cls`. Instead, directly pass different resource pool to different worker groups.
73
+ # See TODO(url) for more information.
74
+ all_wg = {}
75
+ for resource_pool, class_dict in self.resource_pool_to_cls.items():
76
+ worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
77
+ wg_dict = self.ray_worker_group_cls(resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls)
78
+ spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
79
+ all_wg.update(spawn_wg)
80
+
81
+ if self.use_critic:
82
+ self.critic_wg = all_wg['critic']
83
+ self.critic_wg.init_model()
84
+
85
+ if self.use_reference_policy:
86
+ self.ref_policy_wg = all_wg['ref']
87
+ self.ref_policy_wg.init_model()
88
+
89
+ if self.use_rm:
90
+ self.rm_wg = all_wg['rm']
91
+ self.rm_wg.init_model()
92
+
93
+ # we should create rollout at the end so that vllm can have a better estimation of kv cache memory
94
+ self.actor_rollout_wg = all_wg['actor_rollout']
95
+ self.actor_rollout_wg.init_model()
96
+
97
+ .. note:: For megatron backend, if we merge the ``worker_groups`` into the same processes, all the roles will utilize the same 3D parallel size. To optimize this, we may need to maintain several 3D process groups for each role in the same distributed context. If you want to use different 3D parallel size for different roles, please follow the similar architecture of the first code block to initialize each role's ``worker_group``
98
+
99
+
100
+ PPO Training Loop
101
+ -----------------
102
+
103
+ We implement the PPO training loop by calling the functions in
104
+ worker_group of each role. The input and output data of each function is
105
+ a ``DataProto`` object implemented in `protocol.py <https://github.com/volcengine/verl/blob/main/verl/protocol.py>`_. In the training
106
+ loop, trainer will dispatch/collect the data to/from different GPUs
107
+ following the transfer protocols wrapped in the workers' functions. The
108
+ computation of PPO micro batches is processed in ``update_actor`` and
109
+ ``update_critic`` functions.
110
+
111
+ To extend to other RLHF algorithms, such as DPO, GRPO, please refer to
112
+ :doc:`../advance/dpo_extension`.
113
+
114
+ .. code:: python
115
+
116
+ def fit(self):
117
+ """
118
+ The training loop of PPO.
119
+ The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow.
120
+ The light-weight advantage computation is done on the driver process.
121
+ """
122
+ from verl.utils.tracking import Tracking
123
+ from omegaconf import OmegaConf
124
+
125
+ logger = Tracking(project_name=self.config.trainer.project_name,
126
+ experiment_name=self.config.trainer.experiment_name,
127
+ default_backend=self.config.trainer.logger,
128
+ config=OmegaConf.to_container(self.config, resolve=True))
129
+
130
+ global_steps = 0
131
+
132
+ # perform validation before training
133
+ # currently, we only support validation using the reward_function.
134
+ if self.val_reward_fn is not None:
135
+ val_metrics = self._validate()
136
+ pprint(f'Initial validation metrics: {val_metrics}')
137
+
138
+ for epoch in range(self.config.trainer.total_epochs):
139
+ for batch_dict in self.train_dataloader:
140
+ metrics = {}
141
+
142
+ batch: DataProto = DataProto.from_single_dict(batch_dict)
143
+ # batch = batch.to('cuda')
144
+
145
+ # pop those keys for generation
146
+ gen_batch = batch.pop(batch_keys=['input_ids', 'attention_mask', 'position_ids'])
147
+
148
+ # generate a batch
149
+ with Timer(name='gen', logger=None) as timer:
150
+ gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch)
151
+ metrics['timing/gen'] = timer.last
152
+
153
+ batch = batch.union(gen_batch_output)
154
+
155
+ if self.use_reference_policy:
156
+ # compute reference log_prob
157
+ with Timer(name='ref', logger=None) as timer:
158
+ ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch)
159
+ batch = batch.union(ref_log_prob)
160
+ metrics['timing/ref'] = timer.last
161
+
162
+ # compute values
163
+ with Timer(name='values', logger=None) as timer:
164
+ values = self.critic_wg.compute_values(batch)
165
+ batch = batch.union(values)
166
+ metrics['timing/values'] = timer.last
167
+
168
+ with Timer(name='adv', logger=None) as timer:
169
+ # compute scores. Support both model and function-based.
170
+ # We first compute the scores using reward model. Then, we call reward_fn to combine
171
+ # the results from reward model and rule-based results.
172
+ if self.use_rm:
173
+ # we first compute reward model score
174
+ reward_tensor = self.rm_wg.compute_rm_score(batch)
175
+ batch = batch.union(reward_tensor)
176
+
177
+ # we combine with rule-based rm
178
+ reward_tensor = self.reward_fn(batch)
179
+ batch.batch['token_level_scores'] = reward_tensor
180
+
181
+ # compute rewards. apply_kl_penalty if available
182
+ batch, kl_metrics = apply_kl_penalty(batch,
183
+ kl_ctrl=self.kl_ctrl,
184
+ kl_penalty=self.config.algorithm.kl_penalty)
185
+ metrics.update(kl_metrics)
186
+
187
+ # compute advantages, executed on the driver process
188
+ batch = compute_advantage(batch,
189
+ self.config.algorithm.gamma,
190
+ self.config.algorithm.lam,
191
+ adv_estimator=self.config.algorithm.adv_estimator)
192
+ metrics['timing/adv'] = timer.last
193
+
194
+ # update critic
195
+ if self.use_critic:
196
+ with Timer(name='update_critic', logger=None) as timer:
197
+ critic_output = self.critic_wg.update_critic(batch)
198
+ metrics['timing/update_critic'] = timer.last
199
+ critic_output_metrics = reduce_metrics(critic_output.meta_info['metrics'])
200
+ metrics.update(critic_output_metrics)
201
+
202
+ # implement critic warmup
203
+ if self.config.trainer.critic_warmup <= global_steps:
204
+ # update actor
205
+ with Timer(name='update_actor', logger=None) as timer:
206
+ actor_output = self.actor_rollout_wg.update_actor(batch)
207
+ metrics['timing/update_actor'] = timer.last
208
+ actor_output_metrics = reduce_metrics(actor_output.meta_info['metrics'])
209
+ metrics.update(actor_output_metrics)
210
+
211
+ # validate
212
+ if self.val_reward_fn is not None and (global_steps + 1) % self.config.trainer.test_freq == 0:
213
+ with Timer(name='testing', logger=None) as timer:
214
+ val_metrics: dict = self._validate()
215
+ val_metrics = {f'val/{key}': val for key, val in val_metrics.items()}
216
+ metrics['timing/testing'] = timer.last
217
+ metrics.update(val_metrics)
218
+
219
+ # collect metrics
220
+ data_metrics = compute_data_metrics(batch=batch)
221
+ metrics.update(data_metrics)
222
+
223
+ # TODO: make a canonical logger that supports various backend
224
+ logger.log(data=metrics, step=global_steps)
225
+
226
+ if self.config.trainer.save_freq > 0 and (global_steps + 1) % self.config.trainer.save_freq == 0:
227
+ actor_local_path = os.path.join(self.config.trainer.default_local_dir, 'actor',
228
+ f'global_step_{global_steps}')
229
+ actor_remote_path = os.path.join(self.config.trainer.default_hdfs_dir, 'actor')
230
+ self.actor_rollout_wg.save_checkpoint(actor_local_path, actor_remote_path)
231
+
232
+ if self.use_critic:
233
+ critic_local_path = os.path.join(self.config.trainer.default_local_dir, 'critic',
234
+ f'global_step_{global_steps}')
235
+ critic_remote_path = os.path.join(self.config.trainer.default_hdfs_dir, 'critic')
236
+ self.critic_wg.save_checkpoint(critic_local_path, critic_remote_path)
237
+
238
+ global_steps += 1
239
+
240
+ # perform validation after training
241
+ if self.val_reward_fn is not None:
242
+ val_metrics = self._validate()
243
+ pprint(f'Final validation metrics: {val_metrics}')
deep_search/DeepResearcher/evaluate/cacluate_metrics.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ import os
4
+ import random
5
+ import time
6
+ from tqdm import tqdm
7
+ import re
8
+ import pandas as pd
9
+ import string
10
+ import sys
11
+
12
+ from openai.types import Completion as OpenAICompletion
13
+ from openai import RateLimitError as OpenAIRateLimitError
14
+ from openai import APIError as OpenAIAPIError
15
+ from openai import Timeout as OpenAITimeout
16
+
17
+ import requests
18
+
19
+ def call_gpt_4o_mini(prompt):
20
+ url = "YOUR API BASE URL"
21
+ headers = {
22
+ "Authorization": "Bearer YOUR API KEY",
23
+ "Content-Type": "application/json"
24
+ }
25
+ data = {
26
+ "model": "gpt-4o-mini",
27
+ "messages": [
28
+ {"role": "system", "content": "You are a helpful assistant."},
29
+ {"role": "user", "content": prompt}
30
+ ],
31
+ "temperature": 0.7
32
+ }
33
+
34
+ response = requests.post(url, headers=headers, json=data)
35
+
36
+ if response.status_code == 200:
37
+ result = response.json()
38
+ return result["choices"][0]["message"]["content"]
39
+ else:
40
+ return f"Error {response.status_code}: {response.text}"
41
+
42
+
43
+ def check_tags_balance(solution_str: str) -> bool:
44
+ """检查标签是否正确配对
45
+
46
+ Args:
47
+ solution_str: 需要检查的字符串
48
+
49
+ Returns:
50
+ bool: 标签是否都正确配对
51
+ """
52
+ # 需要检查的标签对
53
+ tags_to_check = ['tool_call', 'think', 'answer']
54
+
55
+ for tag in tags_to_check:
56
+ # 计算开始和结束标签的数量
57
+ start_tag = f"<{tag}>"
58
+ end_tag = f"</{tag}>"
59
+
60
+ start_count = solution_str.count(start_tag)
61
+ end_count = solution_str.count(end_tag)
62
+
63
+ # 如果开始和结束标签数量不相等,返回False
64
+ if start_count != end_count:
65
+ return False
66
+
67
+ # 检查标签的嵌套顺序(确保结束标签不会在开始标签之前出现)
68
+ last_pos = -1
69
+ while True:
70
+ start_pos = solution_str.find(start_tag, last_pos + 1)
71
+ if start_pos == -1:
72
+ break
73
+
74
+ end_pos = solution_str.find(end_tag, start_pos)
75
+ if end_pos == -1:
76
+ return False
77
+
78
+ last_pos = end_pos
79
+
80
+ return True
81
+
82
+ def preprocess_text(text: str) -> str:
83
+ """预处理文本,用于NQ数据集的评分
84
+
85
+ 处理步骤:
86
+ 1. 转换为小写
87
+ 2. 移除标点符号 (.,!?;:'"()[]{}...)
88
+ 3. 去除多余空格
89
+ """
90
+ # 将标点符号替换为空格
91
+ for punct in string.punctuation:
92
+ text = text.replace(punct, ' ')
93
+
94
+ # 替换多个空格为单个空格
95
+ text = re.sub(r'\s+', ' ', text)
96
+
97
+ # 去除首尾空格
98
+ text = text.strip()
99
+ return text
100
+
101
+ PROMPT='''You will be given a question and its ground truth answer list where each item can be a ground truth answer. Provided a pred_answer, you need to judge if the pred_answer correctly answers the question based on the ground truth answer list.
102
+ You should first give your rationale for the judgement, and then give your judgement result (i.e., correct or incorrect).
103
+
104
+ Here is the criteria for the judgement:
105
+ 1. The pred_answer doesn't need to be exactly the same as any of the ground truth answers, but should be semantically same for the question.
106
+ 2. Each item in the ground truth answer list can be viewed as a ground truth answer for the question, and the pred_answer should be semantically same to at least one of them.
107
+
108
+ question: {question}
109
+ ground truth answers: {gt_answer}
110
+ pred_answer: {pred_answer}
111
+
112
+ The output should in the following json format:
113
+
114
+ The output should in the following json format:
115
+ \'\'\'json
116
+ {
117
+ \"rationale\": \"your rationale for the judgement, as a text\",
118
+ \"judgement\": \"your judgement result, can only be \'correct\' or \'incorrect\'\"
119
+ }
120
+ \'\'\'
121
+ Your output:
122
+ '''
123
+
124
+ def get_json(json_str):
125
+ import json
126
+ import re
127
+
128
+ # 使用正则提取花括号中的 JSON 部分
129
+ try:
130
+ match = re.search(r"\{.*\}", json_str, re.DOTALL)
131
+ if match:
132
+ json_str = match.group()
133
+ data = json.loads(json_str)
134
+ return data
135
+ else:
136
+ return {}
137
+ except:
138
+ return {}
139
+
140
+ def get_mbe_result(question,gts,pred_answer):
141
+ judgement = ""
142
+ try_cnt = 0
143
+ while True:
144
+ prompt = PROMPT.replace("{question}",question).replace("{gt_answer}",str(gts)).replace("{pred_answer}",pred_answer)
145
+ try:
146
+ batch_responses = call_gpt_4o_mini(prompt)
147
+ judgement = get_json(batch_responses)
148
+ print(judgement)
149
+ if "judgement" in judgement:
150
+ judgement = judgement["judgement"]
151
+ if judgement in ["correct", "incorrect"]:
152
+ if judgement == "correct":
153
+ return 1.0
154
+ else:
155
+ return 0.0
156
+ except:
157
+ try_cnt += 1
158
+ if try_cnt > 100:
159
+ return 0.0
160
+
161
+ def compute_score(question,solution_str, ground_truth, val_type='f1',cot=False) -> float:
162
+ solution_str = solution_str.lower()
163
+ ground_truth = ground_truth.lower()
164
+ ground_truths = ground_truth.split("<|answer_split|>")
165
+ # 首先检查标签是否配对正确(格式是否正确)
166
+ if cot == True:
167
+ solution_str = solution_str + "</answer>"
168
+ solution_str = solution_str.split("<|im_start|>assistant")[-1]
169
+ if not check_tags_balance(solution_str):
170
+ return -0.0
171
+ # 使用正则提取第一个<answer>标签中的内容
172
+ try:
173
+ answer_match = re.search(r'<answer>(.*?)</answer>', solution_str, re.DOTALL)
174
+ if answer_match:
175
+ answer_content = answer_match.group(1).strip()
176
+ # 对答案进行预处理
177
+ answer_content = preprocess_text(answer_content)
178
+ else:
179
+ return -0.0 # 如果没有answer标签,返回-1.0表示格式错误
180
+ except Exception as e:
181
+ print(f"Error extracting answer content: {e}")
182
+ return -0.0
183
+
184
+ max_score = 0.0
185
+
186
+ for gt in ground_truths:
187
+ # 对ground truth进行预处理
188
+ gt = preprocess_text(gt)
189
+
190
+ if val_type == 'em' or val_type == "mbe":
191
+ if gt == answer_content:
192
+ return 1.0
193
+ else:
194
+ # 将答案和参考答案分词
195
+ pred_tokens = set(answer_content.split())
196
+ gt_tokens = set(gt.split())
197
+
198
+ if not gt_tokens: # 避免除零错误
199
+ continue
200
+ if not pred_tokens:
201
+ continue
202
+
203
+ # 计算共同的词数
204
+ common_tokens = pred_tokens & gt_tokens
205
+
206
+ # 计算精确率和召回率
207
+ precision = len(common_tokens) / len(pred_tokens) if pred_tokens else 0
208
+ recall = len(common_tokens) / len(gt_tokens) if gt_tokens else 0
209
+
210
+ # 计算F1分数
211
+ if precision + recall > 0: # 避免除零错误
212
+ f1 = 2 * (precision * recall) / (precision + recall)
213
+ max_score = max(max_score, f1)
214
+ if val_type == "mbe":
215
+ max_score = get_mbe_result(question,ground_truths,answer_content)
216
+
217
+
218
+ return max_score
219
+
220
+ method = sys.argv[1]
221
+ file_path = "../data/test.parquet"
222
+ df = pd.read_parquet(file_path)
223
+ gts = json.loads(df.to_json(orient="records", force_ascii=False))
224
+
225
+
226
+ with open(f"./{method}_result.json","r",encoding="utf-8") as f:
227
+ answers = json.load(f)
228
+ result = {}
229
+ from concurrent.futures import ThreadPoolExecutor, as_completed
230
+ from tqdm import tqdm
231
+ from collections import defaultdict
232
+
233
+ result = defaultdict(lambda: {"f1": [], "em": [], "mbe": []})
234
+
235
+ def compute_metrics(gt, answer, method):
236
+ question = gt["prompt"][0]["content"]
237
+ gt_answer = gt["reward_model"]["ground_truth"]
238
+ data_source = gt["data_source"]
239
+ mbe = 0.0
240
+ if method in ["rag", "cot"]:
241
+ f1 = compute_score(question, answer["response"], gt_answer, "f1", cot=True)
242
+ em = compute_score(question, answer["response"], gt_answer, "em", cot=True)
243
+ mbe = compute_score(question, answer["response"], gt_answer, "mbe", cot=True)
244
+ elif method in ["search_r1_wo_ir","search_r1"]:
245
+ data_source = answer["data_source"]
246
+ question = answer["question"]
247
+ gt_answer = answer["gt_answer"]
248
+ f1 = compute_score(question, answer["r1_answer"], gt_answer, "f1", cot=False)
249
+ em = compute_score(question, answer["r1_answer"], gt_answer, "em", cot=False)
250
+ mbe = compute_score(question, answer["r1_answer"], gt_answer, "mbe", cot=False)
251
+ elif method in ["r1_searcher"]:
252
+ data_source = answer["data_source"]
253
+ question = answer["question"]
254
+ gt_answer = answer["answer"]
255
+ an = f"<answer>{answer['pred_ans']}</answer>"
256
+ f1 = compute_score(question, an, gt_answer, "f1", cot=False)
257
+ em = compute_score(question, an, gt_answer, "em", cot=False)
258
+ mbe = compute_score(question, an, gt_answer, "mbe", cot=False)
259
+ else:
260
+ f1 = compute_score(question, answer["message_str"], gt_answer, "f1", cot=False)
261
+ em = compute_score(question, answer["message_str"], gt_answer, "em", cot=False)
262
+ mbe = compute_score(question, answer["message_str"], gt_answer, "mbe", cot=False)
263
+
264
+ return data_source, f1, em, mbe
265
+
266
+ with ThreadPoolExecutor(max_workers=16) as executor:
267
+ futures = [executor.submit(compute_metrics, gt, answer, method) for gt, answer in zip(gts, answers)]
268
+
269
+ for future in tqdm(as_completed(futures), total=len(futures)):
270
+ data_source, f1, em, mbe = future.result()
271
+ result[data_source]["f1"].append(f1)
272
+ result[data_source]["em"].append(em)
273
+ result[data_source]["mbe"].append(mbe)
274
+
275
+ # 平均分计算
276
+ for data_source in result:
277
+ result[data_source]["f1"] = sum(result[data_source]["f1"]) / len(result[data_source]["f1"])
278
+ result[data_source]["em"] = sum(result[data_source]["em"]) / len(result[data_source]["em"])
279
+ result[data_source]["mbe"] = sum(result[data_source]["mbe"]) / len(result[data_source]["mbe"])
280
+
281
+ with open(f"./{method}_score.json","w" ,encoding="utf-8") as f:
282
+ answers = json.dump(result,f,indent=4)
283
+
deep_search/DeepResearcher/verl/single_controller/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+
17
+ version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__)))
18
+
19
+ # Note(haibin.lin): single_controller.__version__ is deprecated
20
+ with open(os.path.join(os.path.join(version_folder, os.pardir), 'version/version')) as f:
21
+ __version__ = f.read().strip()
22
+
23
+ from . import base
24
+ from .base import *
25
+
26
+ __all__ = base.__all__
deep_search/DeepResearcher/verl/single_controller/base/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .worker import Worker
16
+ from .worker_group import WorkerGroup, ClassWithInitArgs, ResourcePool
17
+
18
+ __all__ = ['Worker', 'WorkerGroup', 'ClassWithInitArgs', 'ResourcePool']
deep_search/DeepResearcher/verl/single_controller/base/decorator.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from enum import Enum
16
+ from functools import wraps
17
+ from typing import Dict, List, Tuple
18
+ from types import FunctionType
19
+ from verl.protocol import DataProtoFuture
20
+
21
+ # here we add a magic number of avoid user-defined function already have this attribute
22
+ MAGIC_ATTR = 'attrs_3141562937'
23
+
24
+
25
+ class Dispatch(Enum):
26
+ RANK_ZERO = 0
27
+ ONE_TO_ALL = 1
28
+ ALL_TO_ALL = 2
29
+ MEGATRON_COMPUTE = 3
30
+ MEGATRON_PP_AS_DP = 4
31
+ MEGATRON_PP_ONLY = 5
32
+ MEGATRON_COMPUTE_PROTO = 6
33
+ MEGATRON_PP_AS_DP_PROTO = 7
34
+ DP_COMPUTE = 8
35
+ DP_COMPUTE_PROTO = 9
36
+ DP_COMPUTE_PROTO_WITH_FUNC = 10
37
+ DP_COMPUTE_METRIC = 11
38
+
39
+
40
+ class Execute(Enum):
41
+ ALL = 0
42
+ RANK_ZERO = 1
43
+
44
+
45
+ def _split_args_kwargs_data_proto(chunks, *args, **kwargs):
46
+ from verl.protocol import DataProto, DataProtoFuture
47
+ splitted_args = []
48
+ for arg in args:
49
+ assert isinstance(arg, (DataProto, DataProtoFuture))
50
+ splitted_args.append(arg.chunk(chunks=chunks))
51
+
52
+ splitted_kwargs = {}
53
+ for key, val in kwargs.items():
54
+ assert isinstance(val, (DataProto, DataProtoFuture))
55
+ splitted_kwargs[key] = val.chunk(chunks=chunks)
56
+
57
+ return splitted_args, splitted_kwargs
58
+
59
+
60
+ def dispatch_one_to_all(worker_group, *args, **kwargs):
61
+ args = tuple([arg] * worker_group.world_size for arg in args)
62
+ kwargs = {k: [v] * worker_group.world_size for k, v in kwargs.items()}
63
+ return args, kwargs
64
+
65
+
66
+ def dispatch_all_to_all(worker_group, *args, **kwargs):
67
+ return args, kwargs
68
+
69
+
70
+ def collect_all_to_all(worker_group, output):
71
+ return output
72
+
73
+
74
+ def dispatch_megatron_compute(worker_group, *args, **kwargs):
75
+ """
76
+ User passes in dp data. The data is dispatched to all tp/pp ranks with the same dp
77
+ """
78
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
79
+ assert isinstance(worker_group,
80
+ MegatronWorkerGroup), f'worker_group must be MegatronWorkerGroup, Got {type(worker_group)}'
81
+
82
+ all_args = []
83
+ for arg in args:
84
+ assert isinstance(arg, (Tuple, List)) and len(arg) == worker_group.dp_size
85
+ transformed_args = []
86
+ for i in range(worker_group.world_size):
87
+ local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
88
+ transformed_args.append(arg[local_dp_rank])
89
+ all_args.append(transformed_args)
90
+ all_args = tuple(all_args)
91
+
92
+ all_kwargs = {}
93
+ for k, v in kwargs.items():
94
+ assert isinstance(v, (Tuple, List)) and len(v) == worker_group.dp_size
95
+ transformed_v = []
96
+ for i in range(worker_group.world_size):
97
+ local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
98
+ transformed_v.append(v[local_dp_rank])
99
+ all_kwargs[k] = transformed_v
100
+ return all_args, all_kwargs
101
+
102
+
103
+ def collect_megatron_compute(worker_group, output):
104
+ """
105
+ Only collect the data from the tp=0 and pp=last and every dp ranks
106
+ """
107
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
108
+ assert isinstance(worker_group, MegatronWorkerGroup)
109
+ output_in_dp = []
110
+ pp_size = worker_group.get_megatron_global_info().pp_size
111
+ for global_rank in range(worker_group.world_size):
112
+ local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank)
113
+ if local_rank_info.tp_rank == 0 and local_rank_info.pp_rank == pp_size - 1:
114
+ output_in_dp.append(output[global_rank])
115
+ return output_in_dp
116
+
117
+
118
+ def dispatch_megatron_compute_data_proto(worker_group, *args, **kwargs):
119
+ """
120
+ All the args and kwargs must be DataProto. The batch will be chunked by dp_size and passed to each rank
121
+ """
122
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
123
+ assert isinstance(worker_group, MegatronWorkerGroup)
124
+
125
+ splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.dp_size, *args, **kwargs)
126
+ return dispatch_megatron_compute(worker_group, *splitted_args, **splitted_kwargs)
127
+
128
+
129
+ def _concat_data_proto_or_future(output: List):
130
+ from verl.protocol import DataProto, DataProtoFuture
131
+ import ray
132
+
133
+ # make sure all the elements in output has the same type
134
+ for o in output:
135
+ assert type(o) == type(output[0])
136
+
137
+ o = output[0]
138
+
139
+ if isinstance(o, DataProto):
140
+ return DataProto.concat(output)
141
+ elif isinstance(o, ray.ObjectRef):
142
+ return DataProtoFuture.concat(output)
143
+ else:
144
+ raise NotImplementedError
145
+
146
+
147
+ def collect_megatron_compute_data_proto(worker_group, output):
148
+ """
149
+ Each output must be a DataProto. We concat the dim=0 of output
150
+ """
151
+ from verl.protocol import DataProto
152
+ import ray
153
+
154
+ output = collect_megatron_compute(worker_group, output)
155
+ for o in output:
156
+ assert isinstance(o, (DataProto, ray.ObjectRef)), f"expecting {o} to be DataProto, but got {type(o)}"
157
+
158
+ return _concat_data_proto_or_future(output)
159
+
160
+
161
+ def dispatch_megatron_pp_as_dp(worker_group, *args, **kwargs):
162
+ """
163
+ treat pp as dp.
164
+ """
165
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
166
+ assert isinstance(worker_group, MegatronWorkerGroup)
167
+
168
+ pp_size = worker_group.pp_size
169
+ dp_size = worker_group.dp_size
170
+
171
+ pp_dp_size = pp_size * dp_size
172
+
173
+ all_args = []
174
+ for arg in args:
175
+ assert isinstance(arg, (List, Tuple)) and len(arg) == pp_dp_size
176
+ transformed_args = []
177
+ for i in range(worker_group.world_size):
178
+ local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
179
+ local_pp_rank = worker_group.get_megatron_rank_info(rank=i).pp_rank
180
+ # compute the rank in arg. Note that the order is dp then pp
181
+ # Also note that the outputs within a pp group will be firstly allgathered, then only the output of pp0 will be collected.
182
+ # For pp=2 dp=4, a batch of data "ABCDEFGH" should be dispatched and collected in below order:
183
+ # dispatch: pp_allgther: collect:
184
+ # dp 0 1 2 3 dp 0 1 2 3
185
+ # pp +---------+ pp +-------------+
186
+ # 0 | A C E G | 0 | AB CD EF GH | ABCDEFGH
187
+ # 1 | B D F H | 1 | AB CD EF GH |
188
+ # +---------+ +-------------+
189
+ arg_rank = local_dp_rank * worker_group.pp_size + local_pp_rank
190
+
191
+ transformed_args.append(arg[arg_rank])
192
+ all_args.append(transformed_args)
193
+ all_args = tuple(all_args)
194
+
195
+ all_kwargs = {}
196
+ for k, v in kwargs.items():
197
+ assert isinstance(v, (List, Tuple)) and len(v) == pp_dp_size, f'expect len(v)=={pp_dp_size}, got {len(v)}'
198
+ transformed_v = []
199
+ for i in range(worker_group.world_size):
200
+ local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
201
+ local_pp_rank = worker_group.get_megatron_rank_info(rank=i).pp_rank
202
+ # compute the rank in arg. Note that the order is dp then pp
203
+ arg_rank = local_dp_rank * worker_group.pp_size + local_pp_rank
204
+ transformed_v.append(v[arg_rank])
205
+ all_kwargs[k] = transformed_v
206
+ return all_args, all_kwargs
207
+
208
+
209
+ def collect_megatron_pp_as_dp(worker_group, output):
210
+ """
211
+ treat pp as dp. Only collect data on tp=0
212
+ """
213
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
214
+ assert isinstance(worker_group, MegatronWorkerGroup)
215
+ output_in_dp = []
216
+ for global_rank in range(worker_group.world_size):
217
+ local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank)
218
+ if local_rank_info.tp_rank == 0 and local_rank_info.pp_rank == 0:
219
+ output_in_dp.append(output[global_rank])
220
+ return output_in_dp
221
+
222
+
223
+ def collect_megatron_pp_only(worker_group, output):
224
+ """
225
+ Only collect output of megatron pp. This is useful when examine weight names as they are identical in tp/dp
226
+ """
227
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
228
+ assert isinstance(worker_group, MegatronWorkerGroup)
229
+ output_in_pp = []
230
+ for global_rank in range(worker_group.world_size):
231
+ local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank)
232
+ if local_rank_info.tp_rank == 0 and local_rank_info.dp_rank == 0:
233
+ output_in_pp.append(output[global_rank])
234
+ return output_in_pp
235
+
236
+
237
+ def dispatch_megatron_pp_as_dp_data_proto(worker_group, *args, **kwargs):
238
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
239
+ assert isinstance(worker_group, MegatronWorkerGroup)
240
+
241
+ pp_dp_size = worker_group.dp_size * worker_group.pp_size
242
+ splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(pp_dp_size, *args, **kwargs)
243
+ return dispatch_megatron_pp_as_dp(worker_group, *splitted_args, **splitted_kwargs)
244
+
245
+
246
+ def collect_megatron_pp_as_dp_data_proto(worker_group, output):
247
+ from verl.protocol import DataProto
248
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
249
+ assert isinstance(worker_group, MegatronWorkerGroup)
250
+
251
+ output = collect_megatron_pp_as_dp(worker_group, output)
252
+ return _concat_data_proto_or_future(output)
253
+
254
+
255
+ def dispatch_dp_compute(worker_group, *args, **kwargs):
256
+ from verl.single_controller.base.worker_group import WorkerGroup
257
+ assert isinstance(worker_group, WorkerGroup)
258
+ for arg in args:
259
+ assert isinstance(arg, (Tuple, List)) and len(arg) == worker_group.world_size
260
+ for k, v in kwargs.items():
261
+ assert isinstance(v, (Tuple, List)) and len(v) == worker_group.world_size
262
+ return args, kwargs
263
+
264
+
265
+ def collect_dp_compute(worker_group, output):
266
+ from verl.single_controller.base.worker_group import WorkerGroup
267
+ assert isinstance(worker_group, WorkerGroup)
268
+ assert len(output) == worker_group.world_size
269
+ return output
270
+
271
+
272
+ def dispatch_dp_compute_data_proto(worker_group, *args, **kwargs):
273
+ from verl.single_controller.base.worker_group import WorkerGroup
274
+ assert isinstance(worker_group, WorkerGroup)
275
+ splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.world_size, *args, **kwargs)
276
+ return splitted_args, splitted_kwargs
277
+
278
+
279
+ def dispatch_dp_compute_data_proto_with_func(worker_group, *args, **kwargs):
280
+ from verl.single_controller.base.worker_group import WorkerGroup
281
+ assert isinstance(worker_group, WorkerGroup)
282
+ assert type(args[0]) == FunctionType # NOTE: The first one args is a function!
283
+
284
+ splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.world_size, *args[1:], **kwargs)
285
+ splitted_args_with_func = [[args[0]] * worker_group.world_size] + splitted_args
286
+ return splitted_args_with_func, splitted_kwargs
287
+
288
+
289
+ def collect_dp_compute_data_proto(worker_group, output):
290
+ from verl.protocol import DataProto
291
+ import ray
292
+
293
+ for o in output:
294
+ assert isinstance(o, (DataProto, ray.ObjectRef)), f"expecting {o} to be DataProto, but got {type(o)}"
295
+
296
+ output = collect_dp_compute(worker_group, output)
297
+ return _concat_data_proto_or_future(output)
298
+
299
+
300
+ def get_predefined_dispatch_fn(dispatch_mode):
301
+ predefined_dispatch_mode_fn = {
302
+ Dispatch.ONE_TO_ALL: {
303
+ 'dispatch_fn': dispatch_one_to_all,
304
+ 'collect_fn': collect_all_to_all,
305
+ },
306
+ Dispatch.ALL_TO_ALL: {
307
+ 'dispatch_fn': dispatch_all_to_all,
308
+ 'collect_fn': collect_all_to_all,
309
+ },
310
+ Dispatch.MEGATRON_COMPUTE: {
311
+ 'dispatch_fn': dispatch_megatron_compute,
312
+ 'collect_fn': collect_megatron_compute,
313
+ },
314
+ Dispatch.MEGATRON_PP_AS_DP: {
315
+ 'dispatch_fn': dispatch_megatron_pp_as_dp,
316
+ 'collect_fn': collect_megatron_pp_as_dp,
317
+ },
318
+ Dispatch.MEGATRON_PP_ONLY: {
319
+ 'dispatch_fn': dispatch_one_to_all,
320
+ 'collect_fn': collect_megatron_pp_only
321
+ },
322
+ Dispatch.MEGATRON_COMPUTE_PROTO: {
323
+ 'dispatch_fn': dispatch_megatron_compute_data_proto,
324
+ 'collect_fn': collect_megatron_compute_data_proto
325
+ },
326
+ Dispatch.MEGATRON_PP_AS_DP_PROTO: {
327
+ 'dispatch_fn': dispatch_megatron_pp_as_dp_data_proto,
328
+ 'collect_fn': collect_megatron_pp_as_dp_data_proto
329
+ },
330
+ Dispatch.DP_COMPUTE: {
331
+ 'dispatch_fn': dispatch_dp_compute,
332
+ 'collect_fn': collect_dp_compute
333
+ },
334
+ Dispatch.DP_COMPUTE_PROTO: {
335
+ 'dispatch_fn': dispatch_dp_compute_data_proto,
336
+ 'collect_fn': collect_dp_compute_data_proto
337
+ },
338
+ Dispatch.DP_COMPUTE_PROTO_WITH_FUNC: {
339
+ 'dispatch_fn': dispatch_dp_compute_data_proto_with_func,
340
+ 'collect_fn': collect_dp_compute_data_proto
341
+ },
342
+ Dispatch.DP_COMPUTE_METRIC: {
343
+ 'dispatch_fn': dispatch_dp_compute_data_proto,
344
+ 'collect_fn': collect_dp_compute
345
+ }
346
+ }
347
+ return predefined_dispatch_mode_fn[dispatch_mode]
348
+
349
+
350
+ def get_predefined_execute_fn(execute_mode):
351
+ """
352
+ Note that here we only asks execute_all and execute_rank_zero to be implemented
353
+ Leave the choice of how these two functions handle argument 'blocking' to users
354
+ """
355
+ predefined_execute_mode_fn = {
356
+ Execute.ALL: {
357
+ 'execute_fn_name': 'execute_all'
358
+ },
359
+ Execute.RANK_ZERO: {
360
+ 'execute_fn_name': 'execute_rank_zero'
361
+ }
362
+ }
363
+ return predefined_execute_mode_fn[execute_mode]
364
+
365
+
366
+ def _check_dispatch_mode(dispatch_mode):
367
+ assert isinstance(dispatch_mode,
368
+ (Dispatch, Dict)), f'dispatch_mode must be a Dispatch or a Dict. Got {dispatch_mode}'
369
+ if isinstance(dispatch_mode, Dict):
370
+ necessary_keys = ['dispatch_fn', 'collect_fn']
371
+ for key in necessary_keys:
372
+ assert key in dispatch_mode, f'key {key} should be in dispatch_mode if it is a dictionary'
373
+
374
+
375
+ def _check_execute_mode(execute_mode):
376
+ assert isinstance(execute_mode, Execute), f'execute_mode must be a Execute. Got {execute_mode}'
377
+
378
+
379
+ def _materialize_futures(*args, **kwargs):
380
+ new_args = []
381
+ for arg in args:
382
+ if isinstance(arg, DataProtoFuture):
383
+ arg = arg.get()
384
+ # add more type to materialize
385
+ new_args.append(arg)
386
+ for k, v in kwargs.items():
387
+ if isinstance(v, DataProtoFuture):
388
+ kwargs[k] = v.get()
389
+
390
+ new_args = tuple(new_args)
391
+ return new_args, kwargs
392
+
393
+
394
+ def register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.ALL, blocking=True, materialize_futures=True):
395
+ _check_dispatch_mode(dispatch_mode=dispatch_mode)
396
+ _check_execute_mode(execute_mode=execute_mode)
397
+
398
+ def decorator(func):
399
+
400
+ @wraps(func)
401
+ def inner(*args, **kwargs):
402
+ if materialize_futures:
403
+ args, kwargs = _materialize_futures(*args, **kwargs)
404
+ return func(*args, **kwargs)
405
+
406
+ attrs = {'dispatch_mode': dispatch_mode, 'execute_mode': execute_mode, 'blocking': blocking}
407
+ setattr(inner, MAGIC_ATTR, attrs)
408
+ return inner
409
+
410
+ return decorator
deep_search/DeepResearcher/verl/single_controller/base/megatron/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
deep_search/DeepResearcher/verl/single_controller/base/megatron/worker.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from verl.single_controller.base.worker import Worker, DistRankInfo, DistGlobalInfo
16
+
17
+
18
+ class MegatronWorker(Worker):
19
+
20
+ def __init__(self, cuda_visible_devices=None) -> None:
21
+ super().__init__(cuda_visible_devices)
22
+
23
+ def get_megatron_global_info(self):
24
+ from megatron.core import parallel_state as mpu
25
+ tp_size = mpu.get_tensor_model_parallel_world_size()
26
+ dp_size = mpu.get_data_parallel_world_size()
27
+ pp_size = mpu.get_pipeline_model_parallel_world_size()
28
+ info = DistGlobalInfo(tp_size=tp_size, dp_size=dp_size, pp_size=pp_size)
29
+ return info
30
+
31
+ def get_megatron_rank_info(self):
32
+ from megatron.core import parallel_state as mpu
33
+ tp_rank = mpu.get_tensor_model_parallel_rank()
34
+ dp_rank = mpu.get_data_parallel_rank()
35
+ pp_rank = mpu.get_pipeline_model_parallel_rank()
36
+ info = DistRankInfo(tp_rank=tp_rank, dp_rank=dp_rank, pp_rank=pp_rank)
37
+ return info
deep_search/DeepResearcher/verl/single_controller/base/megatron/worker_group.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict
16
+
17
+ from .worker import DistRankInfo, DistGlobalInfo
18
+ from verl.single_controller.base import ResourcePool, WorkerGroup
19
+
20
+
21
+ class MegatronWorkerGroup(WorkerGroup):
22
+
23
+ def __init__(self, resource_pool: ResourcePool, **kwargs):
24
+ super().__init__(resource_pool=resource_pool, **kwargs)
25
+ self._megatron_rank_info = None
26
+ self._megatron_global_info: DistGlobalInfo = None
27
+
28
+ def init_megatron(self, default_megatron_kwargs: Dict = None):
29
+ raise NotImplementedError(f"MegatronWorkerGroup.init_megatron should be overwritten")
30
+
31
+ def get_megatron_rank_info(self, rank: int) -> DistRankInfo:
32
+ assert 0 <= rank < self.world_size, f'rank must be from [0, world_size), Got {rank}'
33
+ return self._megatron_rank_info[rank]
34
+
35
+ @property
36
+ def tp_size(self):
37
+ assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
38
+ return self._megatron_global_info.tp_size
39
+
40
+ @property
41
+ def dp_size(self):
42
+ assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
43
+ return self._megatron_global_info.dp_size
44
+
45
+ @property
46
+ def pp_size(self):
47
+ assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
48
+ return self._megatron_global_info.pp_size
49
+
50
+ def get_megatron_global_info(self):
51
+ return self._megatron_global_info
deep_search/DeepResearcher/verl/single_controller/base/register_center/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
deep_search/DeepResearcher/verl/single_controller/base/register_center/ray.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import ray
16
+
17
+
18
+ @ray.remote
19
+ class WorkerGroupRegisterCenter:
20
+
21
+ def __init__(self, rank_zero_info):
22
+ self.rank_zero_info = rank_zero_info
23
+
24
+ def get_rank_zero_info(self):
25
+ return self.rank_zero_info
26
+
27
+
28
+ def create_worker_group_register_center(name, info):
29
+ return WorkerGroupRegisterCenter.options(name=name).remote(info)
deep_search/DeepResearcher/verl/single_controller/base/worker.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ the class for Worker
16
+ """
17
+ import os
18
+ import socket
19
+ from dataclasses import dataclass
20
+ from .decorator import register, Dispatch, Execute
21
+
22
+
23
+ @dataclass
24
+ class DistRankInfo:
25
+ tp_rank: int
26
+ dp_rank: int
27
+ pp_rank: int
28
+
29
+
30
+ @dataclass
31
+ class DistGlobalInfo:
32
+ tp_size: int
33
+ dp_size: int
34
+ pp_size: int
35
+
36
+
37
+ class WorkerHelper:
38
+
39
+ def _get_node_ip(self):
40
+
41
+ def get_node_ip_by_sdk():
42
+ if os.getenv("WG_BACKEND", None) == "ray":
43
+ import ray
44
+ return ray._private.services.get_node_ip_address()
45
+ else:
46
+ raise NotImplementedError("WG_BACKEND now just support ray mode.")
47
+
48
+ host_ipv4 = os.getenv("MY_HOST_IP", None)
49
+ host_ipv6 = os.getenv("MY_HOST_IPV6", None)
50
+ host_ip_by_env = host_ipv4 or host_ipv6
51
+ host_ip_by_sdk = get_node_ip_by_sdk()
52
+
53
+ host_ip = host_ip_by_env or host_ip_by_sdk
54
+ return host_ip
55
+
56
+ def _get_free_port(self):
57
+ with socket.socket() as sock:
58
+ sock.bind(('', 0))
59
+ return sock.getsockname()[1]
60
+
61
+ def get_availale_master_addr_port(self):
62
+ return self._get_node_ip(), str(self._get_free_port())
63
+
64
+ def _get_pid(self):
65
+ return
66
+
67
+
68
+ class WorkerMeta:
69
+ keys = [
70
+ "WORLD_SIZE", "RANK", "LOCAL_WORLD_SIZE", "LOCAL_RANK", "MASTER_ADDR", "MASTER_PORT", "CUDA_VISIBLE_DEVICES"
71
+ ]
72
+
73
+ def __init__(self, store) -> None:
74
+ self._store = store
75
+
76
+ def to_dict(self):
77
+ return {f"_{key.lower()}": self._store.get(f"_{key.lower()}", None) for key in WorkerMeta.keys}
78
+
79
+
80
+ # we assume that in each WorkerGroup, there is a Master Worker
81
+ class Worker(WorkerHelper):
82
+ """A (distributed) worker."""
83
+
84
+ def __new__(cls, *args, **kwargs):
85
+ instance = super().__new__(cls)
86
+
87
+ # note that here we use int to distinguish
88
+ disable_worker_init = int(os.environ.get('DISABLE_WORKER_INIT', 0))
89
+ if disable_worker_init:
90
+ return instance
91
+
92
+ rank = os.environ.get("RANK", None)
93
+ worker_group_prefix = os.environ.get("WG_PREFIX", None)
94
+
95
+ # when decorator @ray.remote applies, __new__ will be called while we don't want to apply _configure_before_init
96
+ if None not in [rank, worker_group_prefix] and 'ActorClass(' not in cls.__name__:
97
+ instance._configure_before_init(f"{worker_group_prefix}_register_center", int(rank))
98
+
99
+ return instance
100
+
101
+ def _configure_before_init(self, register_center_name: str, rank: int):
102
+ assert isinstance(rank, int), f"rank must be int, instead of {type(rank)}"
103
+
104
+ if rank == 0:
105
+ master_addr, master_port = self.get_availale_master_addr_port()
106
+ rank_zero_info = {
107
+ "MASTER_ADDR": master_addr,
108
+ "MASTER_PORT": master_port,
109
+ }
110
+
111
+ if os.getenv("WG_BACKEND", None) == "ray":
112
+ from verl.single_controller.base.register_center.ray import create_worker_group_register_center
113
+ self.register_center = create_worker_group_register_center(name=register_center_name,
114
+ info=rank_zero_info)
115
+
116
+ os.environ.update(rank_zero_info)
117
+
118
+ def __init__(self, cuda_visible_devices=None) -> None:
119
+ # construct a meta from envrionment variable. Note that the import must be inside the class because it is executed remotely
120
+ import os
121
+ world_size = int(os.environ['WORLD_SIZE'])
122
+ rank = int(os.environ['RANK'])
123
+ self._rank = rank
124
+ self._world_size = world_size
125
+
126
+ master_addr = os.environ["MASTER_ADDR"]
127
+ master_port = os.environ["MASTER_PORT"]
128
+
129
+ local_world_size = int(os.getenv("LOCAL_WORLD_SIZE", "1"))
130
+ local_rank = int(os.getenv("LOCAL_RANK", "0"))
131
+
132
+ store = {
133
+ '_world_size': world_size,
134
+ '_rank': rank,
135
+ '_local_world_size': local_world_size,
136
+ '_local_rank': local_rank,
137
+ '_master_addr': master_addr,
138
+ '_master_port': master_port
139
+ }
140
+ if cuda_visible_devices is not None:
141
+ store['_cuda_visible_devices'] = cuda_visible_devices
142
+
143
+ meta = WorkerMeta(store=store)
144
+ self._configure_with_meta(meta=meta)
145
+
146
+ def _configure_with_meta(self, meta: WorkerMeta):
147
+ """
148
+ This function should only be called inside by WorkerGroup
149
+ """
150
+ assert isinstance(meta, WorkerMeta)
151
+ self.__dict__.update(meta.to_dict()) # this is hacky
152
+ # print(f"__dict__: {self.__dict__}")
153
+ for key in WorkerMeta.keys:
154
+ val = self.__dict__.get(f"_{key.lower()}", None)
155
+ if val is not None:
156
+ # print(f"set {key} to {val}")
157
+ os.environ[key] = str(val)
158
+ os.environ["REDIS_STORE_SERVER_HOST"] = str(self._master_addr).replace("[", "").replace(
159
+ "]", "") if self._master_addr else ""
160
+
161
+ def get_master_addr_port(self):
162
+ return self._master_addr, self._master_port
163
+
164
+ def get_cuda_visible_devices(self):
165
+ import os
166
+ cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "not set")
167
+ return cuda_visible_devices
168
+
169
+ @property
170
+ def world_size(self):
171
+ return self._world_size
172
+
173
+ @property
174
+ def rank(self):
175
+ return self._rank
176
+
177
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO_WITH_FUNC)
178
+ def execute_with_func_generator(self, func, *args, **kwargs):
179
+ ret_proto = func(self, *args, **kwargs)
180
+ return ret_proto
181
+
182
+ @register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO)
183
+ def execute_func_rank_zero(self, func, *args, **kwargs):
184
+ result = func(*args, **kwargs)
185
+ return result
deep_search/DeepResearcher/verl/single_controller/base/worker_group.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ the class of WorkerGroup
16
+ """
17
+ import logging
18
+ import threading
19
+ import signal
20
+ import time
21
+ from typing import List, Any, Callable, Dict
22
+
23
+ from .decorator import MAGIC_ATTR, Dispatch, get_predefined_dispatch_fn, get_predefined_execute_fn
24
+
25
+
26
+ class ResourcePool:
27
+ """The resource pool with meta info such as world_size."""
28
+
29
+ def __init__(self, process_on_nodes=None, max_collocate_count: int = 10, n_gpus_per_node=8) -> None:
30
+ if process_on_nodes is None:
31
+ process_on_nodes = []
32
+ self._store = process_on_nodes
33
+ self.max_collocate_count = max_collocate_count
34
+ self.n_gpus_per_node = n_gpus_per_node # this is left for future huawei GPU that contains 16 GPUs per node
35
+
36
+ def add_node(self, process_count):
37
+ self._store.append(process_count)
38
+
39
+ @property
40
+ def world_size(self):
41
+ return sum(self._store)
42
+
43
+ def __call__(self) -> Any:
44
+ return self._store
45
+
46
+ @property
47
+ def store(self):
48
+ return self._store
49
+
50
+ def local_world_size_list(self) -> List[int]:
51
+ nested_local_world_size_list = [
52
+ [local_world_size for _ in range(local_world_size)] for local_world_size in self._store
53
+ ]
54
+ return [item for row in nested_local_world_size_list for item in row]
55
+
56
+ def local_rank_list(self) -> List[int]:
57
+ nested_local_rank_list = [[i for i in range(local_world_size)] for local_world_size in self._store]
58
+ return [item for row in nested_local_rank_list for item in row]
59
+
60
+
61
+ class ClassWithInitArgs:
62
+ """
63
+ This class stores a class constructor and the args/kwargs to construct the class.
64
+ It is used to instantiate the remote class.
65
+ """
66
+
67
+ def __init__(self, cls, *args, **kwargs) -> None:
68
+ self.cls = cls
69
+ self.args = args
70
+ self.kwargs = kwargs
71
+
72
+ # def add_arg(self, arg):
73
+ # self.args += (arg,)
74
+
75
+ # def add_kwarg(self, key, value):
76
+ # self.kwargs[key] = value
77
+
78
+ def __call__(self) -> Any:
79
+ return self.cls(*self.args, **self.kwargs)
80
+
81
+
82
+ def check_workers_alive(workers: List, is_alive: Callable, gap_time: float = 1) -> None:
83
+ import time
84
+ while True:
85
+ for worker in workers:
86
+ if not is_alive(worker):
87
+ logging.warning(f"worker {worker} is not alive" + " sending signal to main thread")
88
+ signal.raise_signal(signal.SIGABRT)
89
+ time.sleep(gap_time)
90
+
91
+
92
+ class WorkerGroup:
93
+ """A group of workers"""
94
+
95
+ def __init__(self, resource_pool: ResourcePool, **kwargs) -> None:
96
+ self._is_init_with_detached_workers = True if resource_pool is None else False
97
+
98
+ if resource_pool is not None:
99
+ # handle the case when WorkGroup is attached to an existing one
100
+ self._procecss_dispatch_config = resource_pool()
101
+ else:
102
+ self._procecss_dispatch_config = None
103
+
104
+ self._workers = []
105
+ self._worker_names = []
106
+
107
+ self._master_addr = None
108
+ self._master_port = None
109
+
110
+ self._checker_thread: threading.Thread = None
111
+
112
+ def _is_worker_alive(self, worker):
113
+ raise NotImplementedError(f"WorkerGroup._is_worker_alive called, should be implemented in derived class.")
114
+
115
+ def _block_until_all_workers_alive(self) -> None:
116
+ while True:
117
+ all_state = [self._is_worker_alive(worker) for worker in self._workers]
118
+ if False in all_state:
119
+ time.sleep(1)
120
+ else:
121
+ break
122
+
123
+ def start_worker_aliveness_check(self, every_n_seconds=1) -> None:
124
+ # before starting checking worker aliveness, make sure all workers are already alive
125
+ self._block_until_all_workers_alive()
126
+
127
+ self._checker_thread = threading.Thread(target=check_workers_alive,
128
+ args=(self._workers, self._is_worker_alive, every_n_seconds))
129
+ self._checker_thread.start()
130
+
131
+ @property
132
+ def world_size(self):
133
+ return len(self._workers)
134
+
135
+ # execute_all_async and execute_rank_zero_async should be implemented by RayWorkerGroup, TorchRPCWorkerGroup,
136
+ # MegatronWorkerGroup, XperfWorkerGroup should skip
137
+
138
+ def _bind_worker_method(self, user_defined_cls, func_generator):
139
+ """
140
+ Bind the worker method to the WorkerGroup
141
+ """
142
+
143
+ for method_name in dir(user_defined_cls):
144
+
145
+ try:
146
+ method = getattr(user_defined_cls, method_name)
147
+ assert callable(method), f"{method_name} in {user_defined_cls} is not callable"
148
+ except Exception as e:
149
+ # if it is a property, it will fail because Class doesn't have instance property
150
+ continue
151
+
152
+ if hasattr(method, MAGIC_ATTR):
153
+ # this method is decorated by register
154
+ attribute = getattr(method, MAGIC_ATTR)
155
+ assert isinstance(attribute, Dict), f'attribute must be a dictionary. Got {type(attribute)}'
156
+ assert 'dispatch_mode' in attribute, f'attribute must contain dispatch_mode in its key'
157
+
158
+ dispatch_mode = attribute['dispatch_mode']
159
+ execute_mode = attribute['execute_mode']
160
+ blocking = attribute['blocking']
161
+
162
+ # get dispatch fn
163
+ if isinstance(dispatch_mode, Dispatch):
164
+ # get default dispatch fn
165
+ fn = get_predefined_dispatch_fn(dispatch_mode=dispatch_mode)
166
+ dispatch_fn = fn['dispatch_fn']
167
+ collect_fn = fn['collect_fn']
168
+ else:
169
+ assert isinstance(dispatch_mode, dict)
170
+ assert 'dispatch_fn' in dispatch_mode
171
+ assert 'collect_fn' in dispatch_mode
172
+ dispatch_fn = dispatch_mode['dispatch_fn']
173
+ collect_fn = dispatch_mode['collect_fn']
174
+
175
+ # get execute_fn_name
176
+ execute_mode = get_predefined_execute_fn(execute_mode=execute_mode)
177
+ wg_execute_fn_name = execute_mode['execute_fn_name']
178
+
179
+ # get execute_fn from string
180
+ try:
181
+ execute_fn = getattr(self, wg_execute_fn_name)
182
+ assert callable(execute_fn), 'execute_fn must be callable'
183
+ except Exception as e:
184
+ print(f'execute_fn {wg_execute_fn_name} is invalid')
185
+ raise
186
+
187
+ # bind a new method to the RayWorkerGroup
188
+ func = func_generator(self,
189
+ method_name,
190
+ dispatch_fn=dispatch_fn,
191
+ collect_fn=collect_fn,
192
+ execute_fn=execute_fn,
193
+ blocking=blocking)
194
+
195
+ try:
196
+ setattr(self, method_name, func)
197
+ except Exception as e:
198
+ raise ValueError(f'Fail to set method_name {method_name}')
deep_search/DeepResearcher/verl/single_controller/ray/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .base import RayResourcePool, RayClassWithInitArgs, RayWorkerGroup, create_colocated_worker_cls
deep_search/DeepResearcher/verl/single_controller/ray/base.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import time
16
+ from typing import Dict, List, Any, Tuple
17
+
18
+ import ray
19
+ from ray.util import list_named_actors
20
+ from ray.util.placement_group import placement_group, PlacementGroup
21
+ from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy, NodeAffinitySchedulingStrategy
22
+ from ray.experimental.state.api import get_actor
23
+
24
+ from verl.single_controller.base import WorkerGroup, ResourcePool, ClassWithInitArgs, Worker
25
+
26
+ __all__ = ['Worker']
27
+
28
+
29
+ def get_random_string(length: int) -> str:
30
+ import random
31
+ import string
32
+ letters_digits = string.ascii_letters + string.digits
33
+ return ''.join(random.choice(letters_digits) for _ in range(length))
34
+
35
+
36
+ def func_generator(self, method_name, dispatch_fn, collect_fn, execute_fn, blocking):
37
+
38
+ def func(*args, **kwargs):
39
+ args, kwargs = dispatch_fn(self, *args, **kwargs)
40
+ output = execute_fn(method_name, *args, **kwargs)
41
+ if blocking:
42
+ output = ray.get(output)
43
+ output = collect_fn(self, output)
44
+ return output
45
+
46
+ return func
47
+
48
+
49
+ class RayResourcePool(ResourcePool):
50
+
51
+ def __init__(self,
52
+ process_on_nodes: List[int] = None,
53
+ use_gpu: bool = True,
54
+ name_prefix: str = "",
55
+ max_colocate_count: int = 5,
56
+ detached=False) -> None:
57
+ super().__init__(process_on_nodes, max_colocate_count)
58
+ self.use_gpu = use_gpu
59
+ # print(f"in RayProcessDispatchConfiguration: name_prefix = {name_prefix}")
60
+ self.name_prefix = name_prefix
61
+ self.pgs = None
62
+ self.detached = detached
63
+
64
+ def get_placement_groups(self, strategy="STRICT_PACK", name=None):
65
+ if self.pgs is not None:
66
+ return self.pgs
67
+
68
+ pg_name_prefix = name if name else \
69
+ f"{self.name_prefix}verl_group_{'_'.join([str(count) for count in self._store])}:"
70
+ # print(f"pg_name_prefix = {pg_name_prefix}")
71
+ pg_scheme = [[{
72
+ "CPU": self.max_collocate_count,
73
+ "GPU": 1
74
+ } if self.use_gpu else {
75
+ "CPU": self.max_collocate_count
76
+ } for _ in range(process_count)] for process_count in self._store]
77
+
78
+ lifetime = 'detached' if self.detached else None
79
+
80
+ pgs = [
81
+ placement_group(bundles=bundles, strategy=strategy, name=pg_name_prefix + str(idx), lifetime=lifetime)
82
+ for idx, bundles in enumerate(pg_scheme)
83
+ ]
84
+
85
+ ray.get([pg.ready() for pg in pgs])
86
+
87
+ self.pgs = pgs
88
+ return pgs
89
+
90
+
91
+ def extract_pg_from_exist(resource_pools: Dict[str, RayResourcePool], src_role_names: List[str],
92
+ resource_pool: RayResourcePool) -> List:
93
+
94
+ src_pgs = [
95
+ pg for role_name, resource_pool in resource_pools.items() for pg in resource_pool.get_placement_groups()
96
+ if role_name in src_role_names
97
+ ]
98
+
99
+ sorted_src_pgs = sorted(src_pgs, key=lambda pg: pg.bundle_count, reverse=True)
100
+ sorted_process_on_nodes = sorted([(val, idx) for idx, val in enumerate(resource_pool.store)], reverse=True)
101
+
102
+ unsorted_pgs: List[Tuple[int, PlacementGroup]] = []
103
+ searching_idx = 0
104
+ for request_process, original_idx in sorted_process_on_nodes:
105
+ assert searching_idx < len(sorted_src_pgs), f"no enough nodes for request: searching {searching_idx} th node"
106
+ assert request_process <= sorted_src_pgs[searching_idx].bundle_count, \
107
+ f"requesting {request_process} processes, bundle count cannot satisfy"
108
+ unsorted_pgs.append((original_idx, sorted_src_pgs[searching_idx]))
109
+ searching_idx += 1
110
+
111
+ return [pg for _, pg in sorted(unsorted_pgs)]
112
+
113
+
114
+ def merge_resource_pool(rp1: RayResourcePool, rp2: RayResourcePool) -> RayResourcePool:
115
+ assert rp1.use_gpu == rp2.use_gpu, 'Both RayResourcePool must either use_gpu or not'
116
+ assert rp1.max_collocate_count == rp2.max_collocate_count, 'Both RayResourcePool must has the same max_collocate_count'
117
+ assert rp1.n_gpus_per_node == rp2.n_gpus_per_node, 'Both RayResourcePool must has the same n_gpus_per_node'
118
+ assert rp1.detached == rp2.detached, 'Detached ResourcePool cannot be merged with non-detached ResourcePool'
119
+
120
+ new_store = rp1.store + rp2.store
121
+
122
+ merged = RayResourcePool(new_store, rp1.use_gpu, f"{rp1.name_prefix}_{rp2.name_prefix}")
123
+ merged.pgs = rp1.get_placement_groups() + rp2.get_placement_groups()
124
+
125
+ return merged
126
+
127
+
128
+ class RayClassWithInitArgs(ClassWithInitArgs):
129
+
130
+ def __init__(self, cls, *args, **kwargs) -> None:
131
+ # self._options = kwargs.pop('options', dict())
132
+ super().__init__(cls, *args, **kwargs)
133
+ self._options = {}
134
+ self._additional_resource = {}
135
+
136
+ def set_additional_resource(self, additional_resource):
137
+ self._additional_resource = additional_resource
138
+
139
+ def update_options(self, options: Dict):
140
+ self._options.update(options)
141
+
142
+ def __call__(self,
143
+ placement_group,
144
+ placement_group_bundle_idx,
145
+ use_gpu: bool = True,
146
+ num_gpus=1,
147
+ sharing_with=None) -> Any:
148
+ if sharing_with is not None:
149
+ target_node_id = ray.get(sharing_with.get_node_id.remote())
150
+ cuda_visible_devices = ray.get(sharing_with.get_cuda_visible_devices.remote())
151
+ options = {"scheduling_strategy": NodeAffinitySchedulingStrategy(node_id=target_node_id, soft=False)}
152
+ return self.cls.options(**options).remote(*self.args,
153
+ cuda_visible_devices=cuda_visible_devices,
154
+ **self.kwargs)
155
+
156
+ options = {
157
+ "scheduling_strategy":
158
+ PlacementGroupSchedulingStrategy(placement_group=placement_group,
159
+ placement_group_bundle_index=placement_group_bundle_idx)
160
+ }
161
+ options.update(self._options)
162
+
163
+ if use_gpu:
164
+ options["num_gpus"] = num_gpus
165
+
166
+ if len(self._additional_resource) > 1:
167
+ for k, v in self._additional_resource.items():
168
+ options[k] = v
169
+
170
+ # print("cls:", self.cls)
171
+ # print("args: ", self.args)
172
+ # print("kwargs: ", self.kwargs)
173
+ return self.cls.options(**options).remote(*self.args, **self.kwargs)
174
+
175
+
176
+ class RayWorkerGroup(WorkerGroup):
177
+
178
+ def __init__(self,
179
+ resource_pool: RayResourcePool = None,
180
+ ray_cls_with_init: RayClassWithInitArgs = None,
181
+ bin_pack: bool = True,
182
+ name_prefix: str = None,
183
+ detached=False,
184
+ worker_names=None,
185
+ **kwargs) -> None:
186
+ super().__init__(resource_pool=resource_pool, **kwargs)
187
+ self.ray_cls_with_init = ray_cls_with_init
188
+ self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix
189
+
190
+ if worker_names is not None:
191
+ assert self._is_init_with_detached_workers
192
+ self._worker_names = worker_names
193
+
194
+ if self._is_init_with_detached_workers:
195
+ self._init_with_detached_workers(worker_names=worker_names)
196
+ else:
197
+ self._init_with_resource_pool(resource_pool=resource_pool,
198
+ ray_cls_with_init=ray_cls_with_init,
199
+ bin_pack=bin_pack,
200
+ detached=detached)
201
+
202
+ if ray_cls_with_init is not None:
203
+ self._bind_worker_method(self.ray_cls_with_init.cls, func_generator)
204
+
205
+ def _is_worker_alive(self, worker: ray.actor.ActorHandle):
206
+ worker_state_dict = get_actor(worker._actor_id.hex())
207
+ return worker_state_dict.get("state", "undefined") == "ALIVE" if worker_state_dict is not None else False
208
+
209
+ def _init_with_detached_workers(self, worker_names):
210
+ workers = [ray.get_actor(name=name) for name in worker_names]
211
+ self._workers = workers
212
+ self._world_size = len(worker_names)
213
+
214
+ def _init_with_resource_pool(self, resource_pool, ray_cls_with_init, bin_pack, detached):
215
+ use_gpu = resource_pool.use_gpu
216
+
217
+ strategy = "PACK"
218
+ if bin_pack:
219
+ strategy = "STRICT_PACK"
220
+ pgs = resource_pool.get_placement_groups(strategy=strategy)
221
+ world_size = resource_pool.world_size
222
+ self._world_size = world_size
223
+ # cia.add_kwarg("_world_size", world_size)
224
+ num_gpus = 1 / resource_pool.max_collocate_count
225
+
226
+ rank = -1
227
+ for pg_idx, local_world_size in enumerate(resource_pool.store):
228
+ pg = pgs[pg_idx]
229
+ assert local_world_size <= pg.bundle_count, \
230
+ f"when generating for {self.name_prefix}, for the "
231
+ for local_rank in range(local_world_size):
232
+ rank += 1
233
+
234
+ # we pass in environment variable at option so that Worker can use environment variable to set
235
+ env_vars = {
236
+ 'WORLD_SIZE': str(world_size),
237
+ 'RANK': str(rank),
238
+ 'WG_PREFIX': self.name_prefix,
239
+ 'WG_BACKEND': 'ray',
240
+ 'RAY_LOCAL_WORLD_SIZE': str(local_world_size),
241
+ 'RAY_LOCAL_RANK': str(local_rank),
242
+ }
243
+ if rank != 0:
244
+ env_vars['MASTER_ADDR'] = self._master_addr
245
+ env_vars['MASTER_PORT'] = self._master_port
246
+
247
+ import re
248
+ cia_name = type(ray_cls_with_init.cls).__name__
249
+ match = re.search(r"ActorClass\(([^)]+)\)", cia_name) # ray.remote(Obj) -> "ActorClass(Obj)"
250
+ cia_name = match.group(1) if match else cia_name # "ActorClass(Obj)" -> "Obj"
251
+ name = f"{self.name_prefix}{cia_name}_{pg_idx}:{local_rank}" # e.g. Worker_2:5
252
+
253
+ ray_cls_with_init.update_options({'runtime_env': {'env_vars': env_vars}, 'name': name})
254
+
255
+ if detached:
256
+ ray_cls_with_init.update_options({'lifetime': 'detached'})
257
+
258
+ # create a worker
259
+ worker = ray_cls_with_init(placement_group=pg,
260
+ placement_group_bundle_idx=local_rank,
261
+ use_gpu=use_gpu,
262
+ num_gpus=num_gpus)
263
+ self._workers.append(worker)
264
+ self._worker_names.append(name)
265
+
266
+ if rank == 0:
267
+ register_center_actor = None
268
+ for _ in range(120):
269
+ if f"{self.name_prefix}_register_center" not in list_named_actors():
270
+ time.sleep(1)
271
+ else:
272
+ register_center_actor = ray.get_actor(f"{self.name_prefix}_register_center")
273
+ break
274
+ assert register_center_actor is not None, f"failed to get register_center_actor: {self.name_prefix}_register_center in {list_named_actors(all_namespaces=True)}"
275
+ rank_zero_info = ray.get(register_center_actor.get_rank_zero_info.remote())
276
+ self._master_addr, self._master_port = rank_zero_info['MASTER_ADDR'], rank_zero_info['MASTER_PORT']
277
+ # print(f"rank_zero_info: {rank_zero_info}")
278
+ # print(f"master_addr: {self._master_addr}, master_port: {self._master_port}")
279
+
280
+ @property
281
+ def worker_names(self):
282
+ return self._worker_names
283
+
284
+ @classmethod
285
+ def from_detached(cls, worker_names=None, ray_cls_with_init=None):
286
+ worker_group = cls(resource_pool=None,
287
+ ray_cls_with_init=ray_cls_with_init,
288
+ name_prefix=None,
289
+ worker_names=worker_names)
290
+ return worker_group
291
+
292
+ def spawn(self, prefix_set):
293
+ """
294
+ spawn to a dictionary of worker groups, each with a subset of method with prefix.
295
+
296
+ """
297
+
298
+ def _rebind_actor_methods(worker_group, actor_name):
299
+ """
300
+ bind the method with actor_prefix to its original name
301
+ """
302
+ prefix: str = actor_name + '_'
303
+ for method_name in dir(worker_group):
304
+ if method_name.startswith(prefix):
305
+ # only valid when Python >= 3.9
306
+ original_method_name = method_name.removeprefix(prefix)
307
+ method = getattr(worker_group, method_name)
308
+ setattr(worker_group, original_method_name, method)
309
+
310
+ new_worker_group_dict = {}
311
+ for prefix in prefix_set:
312
+ new_worker_group = self.from_detached(worker_names=self._worker_names,
313
+ ray_cls_with_init=self.ray_cls_with_init)
314
+
315
+ _rebind_actor_methods(new_worker_group, prefix)
316
+ new_worker_group_dict[prefix] = new_worker_group
317
+ return new_worker_group_dict
318
+
319
+ def execute_rank_zero_sync(self, method_name: str, *args, **kwargs):
320
+ return ray.get(self.execute_rank_zero_async(method_name, *args, **kwargs))
321
+
322
+ def execute_rank_zero_async(self, method_name: str, *args, **kwargs):
323
+ remote_call = getattr(self._workers[0], method_name)
324
+ return remote_call.remote(*args, **kwargs)
325
+
326
+ def execute_rank_zero(self, method_name: str, *args, **kwargs):
327
+ return self.execute_rank_zero_async(method_name, *args, **kwargs)
328
+
329
+ def execute_all(self, method_name: str, *args, **kwargs):
330
+ return self.execute_all_async(method_name, *args, **kwargs)
331
+
332
+ def execute_all_sync(self, method_name: str, *args, **kwargs):
333
+ return ray.get(self.execute_all_async(method_name, *args, **kwargs))
334
+
335
+ def execute_all_async(self, method_name: str, *args, **kwargs):
336
+ # Here, we assume that if all arguments in args and kwargs are lists, and their lengths match len(self._workers),
337
+ # we'll distribute each element in these lists to the corresponding worker
338
+ # print(f"execute_all_async: method {method_name}({args}, {kwargs})")
339
+ length = len(self._workers)
340
+ if all(isinstance(arg, list) for arg in args) and all(isinstance(kwarg, list) for kwarg in kwargs.values()):
341
+ if all(len(arg) == length for arg in args) and all(len(kwarg) == length for kwarg in kwargs.values()):
342
+ # print(f"splitting args and kwargs into {length} shards")
343
+ result = []
344
+ for i in range(length):
345
+ sliced_args = tuple(arg[i] for arg in args)
346
+ sliced_kwargs = {k: v[i] for k, v in kwargs.items()}
347
+ remote_call = getattr(self._workers[i], method_name)
348
+ result.append(remote_call.remote(*sliced_args, **sliced_kwargs))
349
+ return result
350
+
351
+ return [getattr(worker, method_name).remote(*args, **kwargs) for worker in self._workers]
352
+
353
+ @property
354
+ def master_address(self):
355
+ return self._master_addr
356
+
357
+ @property
358
+ def master_port(self):
359
+ return self._master_port
360
+
361
+ @property
362
+ def workers(self):
363
+ return self._workers
364
+
365
+ @property
366
+ def world_size(self):
367
+ return self._world_size
368
+
369
+
370
+ """
371
+ Utilities that enables creating workers inside the same ray.Actor,
372
+ with code written in separate ray.Actors.
373
+ """
374
+
375
+ from unittest.mock import patch
376
+ from verl.single_controller.base.decorator import MAGIC_ATTR
377
+ import os
378
+
379
+
380
+ def _bind_workers_method_to_parent(cls, key, user_defined_cls):
381
+ """
382
+ Binds the methods of each worker to the WorkerDict.
383
+ Note that we only bind public methods that are decorated by register
384
+ """
385
+ for method_name in dir(user_defined_cls):
386
+ try:
387
+ method = getattr(user_defined_cls, method_name)
388
+ assert callable(method), f"{method_name} in {user_defined_cls} is not callable"
389
+ except Exception as e:
390
+ # if it is a property, it will fail because Class doesn't have instance property
391
+ continue
392
+
393
+ if hasattr(method, MAGIC_ATTR):
394
+
395
+ def generate_function(name):
396
+
397
+ def func(self, *args, **kwargs):
398
+ # dispatch to the actual worker
399
+ return getattr(self.worker_dict[key], name)(*args, **kwargs)
400
+
401
+ return func
402
+
403
+ func = generate_function(method_name)
404
+ # pass MAGIC_ATTR for outer worker group
405
+ setattr(func, MAGIC_ATTR, getattr(method, MAGIC_ATTR))
406
+ try:
407
+ method_name_with_prefix = key + '_' + method_name
408
+ setattr(cls, method_name_with_prefix, func)
409
+ # print(f'Binding {method_name_with_prefix}')
410
+ except Exception as e:
411
+ raise ValueError(f'Fail to set method_name {method_name}')
412
+
413
+
414
+ def _unwrap_ray_remote(cls):
415
+ if hasattr(cls, '__ray_actor_class__'):
416
+ cls = cls.__ray_actor_class__
417
+ return cls
418
+
419
+
420
+ def create_colocated_worker_cls(class_dict: dict[str, RayClassWithInitArgs]):
421
+ """
422
+ This function should return a class instance that delegates the calls to every
423
+ cls in cls_dict
424
+ """
425
+ cls_dict = {}
426
+ init_args_dict = {}
427
+ worker_cls = None
428
+ for key, cls in class_dict.items():
429
+ if worker_cls == None:
430
+ worker_cls = cls.cls.__ray_actor_class__.__base__
431
+ else:
432
+ assert worker_cls == cls.cls.__ray_actor_class__.__base__, \
433
+ 'the worker class should be the same when share the same process'
434
+ cls_dict[key] = cls.cls
435
+ init_args_dict[key] = {'args': cls.args, 'kwargs': cls.kwargs}
436
+
437
+ assert cls_dict.keys() == init_args_dict.keys()
438
+
439
+ # TODO: create a class with customizable name
440
+ class WorkerDict(worker_cls):
441
+
442
+ def __init__(self):
443
+ super().__init__()
444
+ self.worker_dict = {}
445
+ for key, user_defined_cls in cls_dict.items():
446
+ user_defined_cls = _unwrap_ray_remote(user_defined_cls)
447
+ # directly instantiate the class without remote
448
+ with patch.dict(os.environ, {'DISABLE_WORKER_INIT': '1'}):
449
+ self.worker_dict[key] = user_defined_cls(*init_args_dict[key].get('args', ()),
450
+ **init_args_dict[key].get('kwargs', {}))
451
+
452
+ # now monkey-patch the methods from inner class to WorkerDict
453
+ for key, user_defined_cls in cls_dict.items():
454
+ user_defined_cls = _unwrap_ray_remote(user_defined_cls)
455
+ _bind_workers_method_to_parent(WorkerDict, key, user_defined_cls)
456
+
457
+ remote_cls = ray.remote(WorkerDict)
458
+ remote_cls = RayClassWithInitArgs(cls=remote_cls)
459
+ return remote_cls
deep_search/DeepResearcher/verl/single_controller/ray/megatron.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict, Optional
16
+
17
+ import ray
18
+
19
+ from .base import RayWorkerGroup, RayResourcePool, RayClassWithInitArgs
20
+ from verl.single_controller.base.megatron.worker import DistRankInfo, DistGlobalInfo
21
+ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
22
+
23
+
24
+ # NOTE(sgm): for open-source megatron-core
25
+ class NVMegatronRayWorkerGroup(RayWorkerGroup, MegatronWorkerGroup):
26
+ """
27
+ MegatronWorkerGroup will query each worker of its megatron rank info and store it inside the WorkerGroup
28
+ so that the dispatcher can use it to dispatch data.
29
+ """
30
+
31
+ def __init__(self, resource_pool: RayResourcePool, ray_cls_with_init: RayClassWithInitArgs, **kwargs):
32
+ super().__init__(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, **kwargs)
33
+ self._megatron_rank_info: DistRankInfo = self.execute_all_sync(method_name='get_megatron_rank_info')
34
+ self._megatron_global_info: DistGlobalInfo = ray.get(
35
+ self.execute_rank_zero_async(method_name='get_megatron_global_info'))
36
+
37
+
38
+ class MegatronRayWorkerGroup(RayWorkerGroup, MegatronWorkerGroup):
39
+ """
40
+ MegatronWorkerGroup will query each worker of its megatron rank info and store it inside the WorkerGroup
41
+ so that the dispatcher can use it to dispatch data.
42
+ """
43
+
44
+ def __init__(self,
45
+ resource_pool: RayResourcePool,
46
+ ray_cls_with_init: RayClassWithInitArgs,
47
+ default_megatron_kwargs: Dict = None,
48
+ **kwargs):
49
+ super().__init__(resource_pool=resource_pool,
50
+ ray_cls_with_init=ray_cls_with_init,
51
+ default_megatron_kwargs=default_megatron_kwargs,
52
+ **kwargs)
53
+ self.init_megatron(default_megatron_kwargs=default_megatron_kwargs)
54
+ self._megatron_rank_info: DistRankInfo = self.execute_all_sync(method_name='get_megatron_rank_info')
55
+ self._megatron_global_info: DistGlobalInfo = ray.get(
56
+ self.execute_rank_zero_async(method_name='get_megatron_global_info'))
57
+
58
+ def init_megatron(self, default_megatron_kwargs: Optional[Dict] = None):
59
+ # after super, we will call init of each worker
60
+ if not self._is_init_with_detached_workers:
61
+ # only init_megatron if the WorkerGroup is created from scratch
62
+ self.execute_all_sync(method_name='init_megatron', default_megatron_kwargs=default_megatron_kwargs)
deep_search/DeepResearcher/verl/utils/checkpoint/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
deep_search/DeepResearcher/verl/utils/checkpoint/checkpoint_manager.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ import shutil
16
+ from filelock import FileLock
17
+ import tempfile
18
+ from typing import Union
19
+ import torch
20
+ import torch.distributed
21
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
22
+ from transformers import PreTrainedTokenizer, ProcessorMixin
23
+ import numpy as np
24
+ import random
25
+
26
+
27
+ class BaseCheckpointManager:
28
+ """
29
+ A checkpoint manager that saves and loads
30
+ - model
31
+ - optimizer
32
+ - lr_scheduler
33
+ - extra_states
34
+ in a SPMD way.
35
+
36
+ We save
37
+ - sharded model states and optimizer states
38
+ - full lr_scheduler states
39
+ - huggingface tokenizer and config for ckpt merge
40
+ """
41
+
42
+ def __init__(self, model: FSDP, optimizer: torch.optim.Optimizer,
43
+ lr_scheduler: torch.optim.lr_scheduler.LRScheduler, processing_class: Union[PreTrainedTokenizer,
44
+ ProcessorMixin]):
45
+ self.previous_global_step = None
46
+ self.previous_save_local_path = None
47
+
48
+ self.model = model
49
+ self.optimizer = optimizer
50
+ self.lr_scheduler = lr_scheduler
51
+ self.processing_class = processing_class
52
+
53
+ assert isinstance(self.model, FSDP)
54
+ self.rank = torch.distributed.get_rank()
55
+ self.world_size = torch.distributed.get_world_size()
56
+
57
+ def load_checkpoint(self, *args, **kwargs):
58
+ raise NotImplementedError
59
+
60
+ def save_checkpoint(self, *args, **kwargs):
61
+ raise NotImplementedError
62
+
63
+ def remove_previous_save_local_path(self):
64
+ if not self.previous_save_local_path:
65
+ return
66
+
67
+ abs_path = os.path.abspath(self.previous_save_local_path)
68
+ print(f'Checkpoint manager remove previous save local path: {abs_path}')
69
+ if not os.path.exists(abs_path):
70
+ return
71
+
72
+ # remove previous local_path
73
+ shutil.rmtree(abs_path, ignore_errors=True)
74
+
75
+ @staticmethod
76
+ def local_mkdir(path):
77
+ if not os.path.isabs(path):
78
+ working_dir = os.getcwd()
79
+ path = os.path.join(working_dir, path)
80
+
81
+ # Using hash value of path as lock file name to avoid long file name
82
+ lock_filename = f"ckpt_{hash(path) & 0xFFFFFFFF:08x}.lock"
83
+ lock_path = os.path.join(tempfile.gettempdir(), lock_filename)
84
+
85
+ try:
86
+ with FileLock(lock_path, timeout=60): # Add timeout
87
+ # make a new dir
88
+ os.makedirs(path, exist_ok=True)
89
+ except Exception as e:
90
+ print(f"Warning: Failed to acquire lock for {path}: {e}")
91
+ # Even if the lock is not acquired, try to create the directory
92
+ os.makedirs(path, exist_ok=True)
93
+
94
+ return path
95
+
96
+ @staticmethod
97
+ def get_rng_state():
98
+ rng_state = {
99
+ 'cpu': torch.get_rng_state(),
100
+ 'cuda': torch.cuda.get_rng_state(),
101
+ 'numpy': np.random.get_state(),
102
+ 'random': random.getstate(),
103
+ }
104
+ return rng_state
105
+
106
+ @staticmethod
107
+ def load_rng_state(rng_state):
108
+ torch.set_rng_state(rng_state['cpu'])
109
+ torch.cuda.set_rng_state(rng_state['cuda'])
110
+ np.random.set_state(rng_state['numpy'])
111
+ random.setstate(rng_state['random'])
112
+
113
+
114
+ def find_latest_ckpt_path(path, directory_format="global_step_{}"):
115
+ if path is None:
116
+ return None
117
+
118
+ tracker_file = get_checkpoint_tracker_filename(path)
119
+ if not os.path.exists(tracker_file):
120
+ print("Checkpoint tracker file does not exist: %s", tracker_file)
121
+ return None
122
+
123
+ with open(tracker_file, "rb") as f:
124
+ iteration = int(f.read().decode())
125
+ ckpt_path = os.path.join(path, directory_format.format(iteration))
126
+ if not os.path.exists(ckpt_path):
127
+ print("Checkpoint does not exist: %s", ckpt_path)
128
+ return None
129
+
130
+ print("Found checkpoint: %s", ckpt_path)
131
+ return ckpt_path
132
+
133
+
134
+ def get_checkpoint_tracker_filename(root_path: str):
135
+ """
136
+ Tracker file rescords the latest chckpoint during training to restart from.
137
+ """
138
+ return os.path.join(root_path, "latest_checkpointed_iteration.txt")
deep_search/DeepResearcher/verl/utils/checkpoint/fsdp_checkpoint_manager.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import ray
16
+ import os
17
+
18
+ import warnings
19
+ from typing import Union
20
+ import torch
21
+ import torch.distributed
22
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
23
+ from torch.distributed.fsdp import ShardedStateDictConfig, ShardedOptimStateDictConfig
24
+
25
+ from verl.utils.fs import copy_to_local, is_non_local
26
+
27
+ from transformers import PreTrainedTokenizer, ProcessorMixin
28
+
29
+ from .checkpoint_manager import BaseCheckpointManager
30
+
31
+
32
+ class FSDPCheckpointManager(BaseCheckpointManager):
33
+ """
34
+ A checkpoint manager that saves and loads
35
+ - model
36
+ - optimizer
37
+ - lr_scheduler
38
+ - extra_states
39
+ in a SPMD way.
40
+
41
+ We save
42
+ - sharded model states and optimizer states
43
+ - full lr_scheduler states
44
+ - huggingface tokenizer/processor and config for ckpt merge
45
+ """
46
+
47
+ def __init__(self,
48
+ model: FSDP,
49
+ optimizer: torch.optim.Optimizer,
50
+ lr_scheduler: torch.optim.lr_scheduler.LRScheduler,
51
+ processing_class: Union[PreTrainedTokenizer, ProcessorMixin] = None,
52
+ **kwargs):
53
+
54
+ if processing_class is None:
55
+ assert "tokenizer" in kwargs, "tokenizer or processor must be provided"
56
+ warnings.warn("`tokenizer` is deprecated. use `processing_class` instead.", DeprecationWarning)
57
+ processing_class = kwargs.pop("tokenizer")
58
+
59
+ super().__init__(model, optimizer, lr_scheduler, processing_class)
60
+
61
+ def load_checkpoint(self, path=None, del_local_after_load=False, *args, **kwargs):
62
+ if path is None:
63
+ return
64
+
65
+ # every rank download its own checkpoint
66
+ remote_model_path = os.path.join(path, f'model_world_size_{self.world_size}_rank_{self.rank}.pt')
67
+ remote_optim_path = os.path.join(path, f'optim_world_size_{self.world_size}_rank_{self.rank}.pt')
68
+ remote_extra_state_path = os.path.join(path, f'extra_state_world_size_{self.world_size}_rank_{self.rank}.pt')
69
+ print(
70
+ f'[rank-{self.rank}]: Loading from {remote_model_path} and {remote_optim_path} and {remote_extra_state_path}'
71
+ )
72
+ local_model_path = copy_to_local(remote_model_path)
73
+ local_optim_path = copy_to_local(remote_optim_path)
74
+ local_extra_state_path = copy_to_local(remote_extra_state_path)
75
+
76
+ model_state_dict = torch.load(local_model_path)
77
+ optimizer_state_dict = torch.load(local_optim_path)
78
+ extra_state_dict = torch.load(local_extra_state_path)
79
+
80
+ if del_local_after_load:
81
+ try:
82
+ os.remove(local_model_path) if is_non_local(local_model_path) else None
83
+ os.remove(local_optim_path) if is_non_local(local_optim_path) else None
84
+ os.remove(local_extra_state_path) if is_non_local(local_extra_state_path) else None
85
+ except Exception as e:
86
+ print(
87
+ f'[rank-{self.rank}]: remove local resume ckpt file after loading failed, exception {e} will be ignored'
88
+ )
89
+
90
+ lr_scheduler_state_dict = extra_state_dict['lr_scheduler']
91
+
92
+ state_dict_cfg = ShardedStateDictConfig(offload_to_cpu=True)
93
+ optim_cfg = ShardedOptimStateDictConfig(offload_to_cpu=True)
94
+ with FSDP.state_dict_type(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg):
95
+ self.model.load_state_dict(model_state_dict)
96
+ if self.optimizer is not None:
97
+ self.optimizer.load_state_dict(optimizer_state_dict)
98
+ # recover random state
99
+ if 'rng' in extra_state_dict:
100
+ # 'rng' may not exist for backward compatibility
101
+ self.load_rng_state(extra_state_dict['rng'])
102
+
103
+ if self.lr_scheduler is not None:
104
+ self.lr_scheduler.load_state_dict(lr_scheduler_state_dict)
105
+
106
+ def save_checkpoint(self, local_path: str, global_step: int, remove_previous_ckpt=False, *args, **kwargs):
107
+ # record the previous global step
108
+ self.previous_global_step = global_step
109
+
110
+ # remove previous local_path
111
+ # TODO: shall we remove previous ckpt every save?
112
+ if remove_previous_ckpt:
113
+ self.remove_previous_save_local_path()
114
+ local_path = self.local_mkdir(local_path)
115
+ torch.distributed.barrier()
116
+
117
+ # every rank will save its own model and optim shard
118
+ state_dict_cfg = ShardedStateDictConfig(offload_to_cpu=True)
119
+ optim_cfg = ShardedOptimStateDictConfig(offload_to_cpu=True)
120
+ with warnings.catch_warnings():
121
+ warnings.simplefilter("ignore")
122
+ with FSDP.state_dict_type(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg):
123
+ model_state_dict = self.model.state_dict()
124
+ if self.optimizer is not None:
125
+ optimizer_state_dict = self.optimizer.state_dict()
126
+ else:
127
+ optimizer_state_dict = None
128
+ if self.lr_scheduler is not None:
129
+ lr_scheduler_state_dict = self.lr_scheduler.state_dict()
130
+ else:
131
+ lr_scheduler_state_dict = None
132
+
133
+ extra_state_dict = {
134
+ 'lr_scheduler': lr_scheduler_state_dict,
135
+ 'rng': self.get_rng_state(),
136
+ }
137
+ model_path = os.path.join(local_path, f'model_world_size_{self.world_size}_rank_{self.rank}.pt')
138
+ optim_path = os.path.join(local_path, f'optim_world_size_{self.world_size}_rank_{self.rank}.pt')
139
+ extra_path = os.path.join(local_path, f'extra_state_world_size_{self.world_size}_rank_{self.rank}.pt')
140
+
141
+ print(f'[rank-{self.rank}]: Saving model to {os.path.abspath(model_path)}')
142
+ print(f'[rank-{self.rank}]: Saving checkpoint to {os.path.abspath(model_path)}')
143
+ print(f'[rank-{self.rank}]: Saving extra_state to {os.path.abspath(extra_path)}')
144
+ torch.save(model_state_dict, model_path)
145
+ torch.save(optimizer_state_dict, optim_path) # TODO: address optimizer is None
146
+ torch.save(extra_state_dict, extra_path)
147
+
148
+ # wait for everyone to dump to local
149
+ torch.distributed.barrier()
150
+
151
+ if self.rank == 0:
152
+ hf_local_path = os.path.join(local_path, 'huggingface')
153
+ os.makedirs(hf_local_path, exist_ok=True)
154
+ self.model._fsdp_wrapped_module.config.save_pretrained(hf_local_path)
155
+ self.processing_class.save_pretrained(hf_local_path)
156
+
157
+ torch.distributed.barrier()
158
+
159
+ self.previous_save_local_path = local_path
deep_search/DeepResearcher/verl/utils/debug/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .performance import log_gpu_memory_usage
deep_search/DeepResearcher/verl/utils/debug/performance.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+ import torch.distributed as dist
17
+ import logging
18
+
19
+
20
+ def log_gpu_memory_usage(head: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0):
21
+ if (not dist.is_initialized()) or (rank is None) or (dist.get_rank() == rank):
22
+ memory_allocated = torch.cuda.memory_allocated() / 1024**3
23
+ memory_reserved = torch.cuda.memory_reserved() / 1024**3
24
+
25
+ message = f'{head}, memory allocated (GB): {memory_allocated}, memory reserved (GB): {memory_reserved}'
26
+
27
+ if logger is None:
28
+ print(message)
29
+ else:
30
+ logger.log(msg=message, level=level)
deep_search/DeepResearcher/verl/utils/debug/trajectory_tracker.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Trajectory tracker can be inserted into code to save the intermediate results.
16
+ The results will be dump to hdfs for offline comparison.
17
+ Each process will have a client that first move all the tensors to CPU
18
+ """
19
+
20
+ from verl.utils.hdfs_io import makedirs, copy
21
+ import torch
22
+ import os
23
+ import ray
24
+ import io
25
+ import tempfile
26
+
27
+ from collections import deque
28
+
29
+ remote_copy = ray.remote(copy)
30
+
31
+
32
+ @ray.remote
33
+ def save_to_hdfs(data: io.BytesIO, name, hdfs_dir, verbose):
34
+ filename = name + '.pth'
35
+ with tempfile.TemporaryDirectory() as tmpdirname:
36
+ local_filepath = os.path.join(tmpdirname, filename)
37
+ with open(local_filepath, 'wb') as f:
38
+ f.write(data.getbuffer())
39
+ # upload to hdfs
40
+
41
+ if verbose:
42
+ print(f'Saving {local_filepath} to {hdfs_dir}')
43
+ try:
44
+ copy(local_filepath, hdfs_dir)
45
+ except Exception as e:
46
+ print(e)
47
+
48
+
49
+ @ray.remote
50
+ class TrajectoryTracker():
51
+
52
+ def __init__(self, hdfs_dir, verbose) -> None:
53
+ self.hdfs_dir = hdfs_dir
54
+ makedirs(hdfs_dir)
55
+ self.verbose = verbose
56
+
57
+ self.handle = deque()
58
+
59
+ def dump(self, data: io.BytesIO, name):
60
+ # get a temp file and write to it
61
+ self.handle.append(save_to_hdfs.remote(data, name, self.hdfs_dir, self.verbose))
62
+
63
+ def wait_for_hdfs(self):
64
+ while len(self.handle) != 0:
65
+ future = self.handle.popleft()
66
+ ray.get(future)
67
+
68
+
69
+ def dump_data(data, name):
70
+ enable = os.getenv('VERL_ENABLE_TRACKER', '0') == '1'
71
+ if not enable:
72
+ return
73
+ buffer = io.BytesIO()
74
+ torch.save(data, buffer)
75
+ tracker = get_trajectory_tracker()
76
+ ray.get(tracker.dump.remote(buffer, name))
77
+
78
+
79
+ def get_trajectory_tracker():
80
+ hdfs_dir = os.getenv('VERL_TRACKER_HDFS_DIR', default=None)
81
+ verbose = os.getenv('VERL_TRACKER_VERBOSE', default='0') == '1'
82
+ assert hdfs_dir is not None
83
+ tracker = TrajectoryTracker.options(name="global_tracker", get_if_exists=True,
84
+ lifetime="detached").remote(hdfs_dir, verbose)
85
+ return tracker
86
+
87
+
88
+ if __name__ == '__main__':
89
+ # testing
90
+ os.environ['VERL_ENABLE_TRACKER'] = '1'
91
+ os.environ['VERL_TRACKER_HDFS_DIR'] = '~/debug/test'
92
+
93
+ @ray.remote
94
+ def process(iter):
95
+ data = {'obs': torch.randn(10, 20)}
96
+ dump_data(data, f'process_{iter}_obs')
97
+
98
+ ray.init()
99
+
100
+ output_lst = []
101
+
102
+ for i in range(10):
103
+ output_lst.append(process.remote(i))
104
+
105
+ out = ray.get(output_lst)
106
+
107
+ tracker = get_trajectory_tracker()
108
+ ray.get(tracker.wait_for_hdfs.remote())
deep_search/DeepResearcher/verl/utils/reward_score/prime_code/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 PRIME team and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .utils import check_correctness as apps_check_correctness
16
+ import json
17
+ import re
18
+ import traceback
19
+
20
+
21
+ def compute_score(completion, test_cases, continuous=False):
22
+ # try to get code solution from completion. if the completion is pure code, this will not take effect.
23
+ solution = completion.split('```python')[-1].split('```')[0]
24
+ try:
25
+ try:
26
+ if not isinstance(test_cases, dict):
27
+ test_cases = json.loads(test_cases)
28
+ except Exception as e:
29
+ print(f"Error:{e}")
30
+
31
+ # Complete check on all in-out pairs first. If there is no failure, per-sample test can be skipped.
32
+ try:
33
+ res, metadata = apps_check_correctness(in_outs=test_cases, generation=solution, timeout=5, debug=False)
34
+ metadata = dict(enumerate(metadata))[0]
35
+ success = all(map(lambda x: x == True, res))
36
+ if success:
37
+ return success, metadata
38
+ except Exception as e:
39
+ pass
40
+
41
+ test_cases_list = []
42
+ inputs = test_cases["inputs"]
43
+ outputs = test_cases["outputs"]
44
+ for i in range(len(inputs)):
45
+ test_cases_list.append({"inputs": [inputs[i]], "outputs": [outputs[i]]})
46
+
47
+ if continuous:
48
+ # per sample test: if continuous score is needed, test first 10 samples regardless of failures
49
+ # do not test all samples cuz some problems have enormous test cases
50
+ metadata_list = []
51
+ res_list = []
52
+ for test_case_id, test_case in enumerate(test_cases_list):
53
+ res, metadata = apps_check_correctness(in_outs=test_case, generation=solution, timeout=5, debug=False)
54
+ try:
55
+ metadata = dict(enumerate(metadata))[0] # metadata can be empty occasionally
56
+ except Exception as e:
57
+ metadata = {}
58
+ metadata["test_case"] = {}
59
+ metadata["test_case"]["input"] = str(test_case["inputs"][0])
60
+ metadata["test_case"]["output"] = str(test_case["outputs"][0])
61
+ metadata["test_case"]["res"] = str(res)
62
+ metadata_list.append(metadata)
63
+ res_list.extend(res)
64
+
65
+ if test_case_id >= 9:
66
+ break
67
+ res_count = len(res_list) if len(res_list) > 0 else 1
68
+ success = sum(map(lambda x: x == True, res_list)) / res_count
69
+ except Exception as e:
70
+ traceback.print_exc(10)
71
+ success = False
72
+ metadata_list = None
73
+ return success, metadata_list