Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- OpenManus/.gitattributes +30 -0
- OpenManus/.gitignore +180 -0
- OpenManus/.pre-commit-config.yaml +39 -0
- OpenManus/LICENSE +21 -0
- OpenManus/README.md +117 -0
- OpenManus/README_tw.md +117 -0
- OpenManus/README_zh.md +117 -0
- OpenManus/main.py +26 -0
- OpenManus/requirements.txt +21 -0
- OpenManus/run_flow.py +33 -0
- OpenManus/setup.py +49 -0
- deep_search/3-12-1200qa_doc_by_itself_qwen.log +15 -0
- deep_search/3-12-1200qa_doc_by_itself_qwq.log +16 -0
- deep_search/3-15-correct_solutions_sft_add_prompt_doc_by_itself_qwq.log +80 -0
- deep_search/3-18-2kqa_doc_by_itself_DeepSeek-R1-Distill-Qwen-32B.log +62 -0
- deep_search/3-18-2kqa_doc_by_itself_QwQ-32B.log +20 -0
- deep_search/3-18-2kqa_doc_by_itself_Qwen2.5-32B-Instruct.log +20 -0
- deep_search/3-20-new_instruction_2k_sft_remove_exp_doc_by_itself_Qwen2.5-7B-Instruct.log +89 -0
- deep_search/3-22-merged_2462_nq_237_total_2699_doc_by_itself_QwQ-32B.log +107 -0
- deep_search/3-25-strict_selected_1526_sft_doc_by_itself_QwQ-32B.log +67 -0
- deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-32B-Instruct.log +122 -0
- deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-32B-Instruct_eval.log +21 -0
- deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-7B-Instruct.log +119 -0
- deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-7B-Instruct_eval.log +19 -0
- deep_search/3-26_selected_data_1174_sft_doc_by_itself_QwQ-32B.log +70 -0
- deep_search/3-26_strict_selected_1526_sft_format_ans_doc_by_itself_QwQ-32B.log +99 -0
- deep_search/3-26_strict_selected_1526_sft_format_ans_doc_by_itself_QwQ-32B_1.log +22 -0
- deep_search/3-27_merged_syn_long_359_sft_1533_doc_by_itself_QwQ-32B.log +74 -0
- deep_search/3-27_merged_syn_short_398_sft_1572_doc_by_itself_QwQ-32B_1.log +29 -0
- deep_search/4-2_cleaned_871_359_total_1065_doc_by_itself_QwQ-32B.log +75 -0
- deep_search/4-3_871_change_hotpotqa_train_871_doc_by_itself_QwQ-32B_1.log +28 -0
- deep_search/4-4_871_doc_by_itself_qwen7b_inst.log +26 -0
- deep_search/ds_zero3.json +31 -0
- deep_search/ds_zero3_offload.json +39 -0
- deep_search/math_eval.sh +32 -0
- deep_search/mix.sh +265 -0
- deep_search/mix_1.sh +317 -0
- deep_search/mix_re.sh +175 -0
- deep_search/run.sh +25 -0
- deep_search/search_o1/scripts/evaluate_for_rag_rl.py +629 -0
- deep_search/search_o1/scripts/llm_as_judge.py +496 -0
- deep_search/search_o1/scripts/prompts.py +667 -0
- deep_search/search_o1/scripts/reason_one_model.py +915 -0
- deep_search/search_o1/scripts/run_search_o1.py +752 -0
- deep_search/search_o1/scripts/run_search_o1_2.py +776 -0
- deep_search/search_o1/scripts/run_search_o1_test.py +203 -0
- deep_search/search_o1/scripts/search_o1_sum_all_webpage.py +982 -0
- deep_search/search_o1/scripts/search_o1_sum_single_page_test_new_prompt.py +997 -0
- deep_search/search_o1/scripts/vllm_serve_test.py +21 -0
- deep_search/sft.py +204 -0
OpenManus/.gitattributes
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HTML code is incorrectly calculated into statistics, so ignore them
|
| 2 |
+
*.html linguist-detectable=false
|
| 3 |
+
# Auto detect text files and perform LF normalization
|
| 4 |
+
* text=auto eol=lf
|
| 5 |
+
# Ensure shell scripts use LF (Linux style) line endings on Windows
|
| 6 |
+
*.sh text eol=lf
|
| 7 |
+
# Treat specific binary files as binary and prevent line ending conversion
|
| 8 |
+
*.png binary
|
| 9 |
+
*.jpg binary
|
| 10 |
+
*.gif binary
|
| 11 |
+
*.ico binary
|
| 12 |
+
*.jpeg binary
|
| 13 |
+
*.mp3 binary
|
| 14 |
+
*.zip binary
|
| 15 |
+
*.bin binary
|
| 16 |
+
# Preserve original line endings for specific document files
|
| 17 |
+
*.doc text eol=crlf
|
| 18 |
+
*.docx text eol=crlf
|
| 19 |
+
*.pdf binary
|
| 20 |
+
# Ensure source code and script files use LF line endings
|
| 21 |
+
*.py text eol=lf
|
| 22 |
+
*.js text eol=lf
|
| 23 |
+
*.html text eol=lf
|
| 24 |
+
*.css text eol=lf
|
| 25 |
+
# Specify custom diff driver for specific file types
|
| 26 |
+
*.md diff=markdown
|
| 27 |
+
*.json diff=json
|
| 28 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.mov filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
OpenManus/.gitignore
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
|
| 110 |
+
# pdm
|
| 111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 112 |
+
#pdm.lock
|
| 113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 114 |
+
# in version control.
|
| 115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 116 |
+
.pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 121 |
+
__pypackages__/
|
| 122 |
+
|
| 123 |
+
# Celery stuff
|
| 124 |
+
celerybeat-schedule
|
| 125 |
+
celerybeat.pid
|
| 126 |
+
|
| 127 |
+
# SageMath parsed files
|
| 128 |
+
*.sage.py
|
| 129 |
+
|
| 130 |
+
# Environments
|
| 131 |
+
.env
|
| 132 |
+
.venv
|
| 133 |
+
env/
|
| 134 |
+
venv/
|
| 135 |
+
ENV/
|
| 136 |
+
env.bak/
|
| 137 |
+
venv.bak/
|
| 138 |
+
|
| 139 |
+
# Spyder project settings
|
| 140 |
+
.spyderproject
|
| 141 |
+
.spyproject
|
| 142 |
+
|
| 143 |
+
# Rope project settings
|
| 144 |
+
.ropeproject
|
| 145 |
+
|
| 146 |
+
# mkdocs documentation
|
| 147 |
+
/site
|
| 148 |
+
|
| 149 |
+
# mypy
|
| 150 |
+
.mypy_cache/
|
| 151 |
+
.dmypy.json
|
| 152 |
+
dmypy.json
|
| 153 |
+
|
| 154 |
+
# Pyre type checker
|
| 155 |
+
.pyre/
|
| 156 |
+
|
| 157 |
+
# pytype static type analyzer
|
| 158 |
+
.pytype/
|
| 159 |
+
|
| 160 |
+
# Cython debug symbols
|
| 161 |
+
cython_debug/
|
| 162 |
+
|
| 163 |
+
# PyCharm
|
| 164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 168 |
+
.idea/
|
| 169 |
+
|
| 170 |
+
# PyPI configuration file
|
| 171 |
+
.pypirc
|
| 172 |
+
|
| 173 |
+
# Logs
|
| 174 |
+
logs/
|
| 175 |
+
|
| 176 |
+
# Data
|
| 177 |
+
data/
|
| 178 |
+
|
| 179 |
+
# Workspace
|
| 180 |
+
workspace/
|
OpenManus/.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
- repo: https://github.com/psf/black
|
| 3 |
+
rev: 23.1.0
|
| 4 |
+
hooks:
|
| 5 |
+
- id: black
|
| 6 |
+
|
| 7 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 8 |
+
rev: v4.4.0
|
| 9 |
+
hooks:
|
| 10 |
+
- id: trailing-whitespace
|
| 11 |
+
- id: end-of-file-fixer
|
| 12 |
+
- id: check-yaml
|
| 13 |
+
- id: check-added-large-files
|
| 14 |
+
|
| 15 |
+
- repo: https://github.com/PyCQA/autoflake
|
| 16 |
+
rev: v2.0.1
|
| 17 |
+
hooks:
|
| 18 |
+
- id: autoflake
|
| 19 |
+
args: [
|
| 20 |
+
--remove-all-unused-imports,
|
| 21 |
+
--ignore-init-module-imports,
|
| 22 |
+
--expand-star-imports,
|
| 23 |
+
--remove-duplicate-keys,
|
| 24 |
+
--remove-unused-variables,
|
| 25 |
+
--recursive,
|
| 26 |
+
--in-place,
|
| 27 |
+
--exclude=__init__.py,
|
| 28 |
+
]
|
| 29 |
+
files: \.py$
|
| 30 |
+
|
| 31 |
+
- repo: https://github.com/pycqa/isort
|
| 32 |
+
rev: 5.12.0
|
| 33 |
+
hooks:
|
| 34 |
+
- id: isort
|
| 35 |
+
args: [
|
| 36 |
+
"--profile", "black",
|
| 37 |
+
"--filter-files",
|
| 38 |
+
"--lines-after-imports=2",
|
| 39 |
+
]
|
OpenManus/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 manna_and_poem
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
OpenManus/README.md
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
English | [繁體中文](README_tw.md) | [简体中文](README_zh.md)
|
| 2 |
+
|
| 3 |
+
<p align="left">
|
| 4 |
+
<a href="https://discord.gg/6dn7Sa3a"><img src="https://dcbadge.vercel.app/api/server/DYn29wFk9z?style=flat" alt="Discord Follow"></a>
|
| 5 |
+
</p>
|
| 6 |
+
|
| 7 |
+
# OpenManus 🙋
|
| 8 |
+
|
| 9 |
+
Manus is incredible, but OpenManus can achieve any ideas without an Invite Code 🛫!
|
| 10 |
+
|
| 11 |
+
Our team members [@mannaandpoem](https://github.com/mannaandpoem) [@XiangJinyu](https://github.com/XiangJinyu) [@MoshiQAQ](https://github.com/MoshiQAQ) [@didiforgithub](https://github.com/didiforgithub) from [@MetaGPT](https://github.com/geekan/MetaGPT) built it within 3 hours!
|
| 12 |
+
|
| 13 |
+
It's a simple implementation, so we welcome any suggestions, contributions, and feedback!
|
| 14 |
+
|
| 15 |
+
Enjoy your own agent with OpenManus!
|
| 16 |
+
|
| 17 |
+
## Project Demo
|
| 18 |
+
|
| 19 |
+
<video src="https://private-user-images.githubusercontent.com/61239030/420168772-6dcfd0d2-9142-45d9-b74e-d10aa75073c6.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDEzMTgwNTksIm5iZiI6MTc0MTMxNzc1OSwicGF0aCI6Ii82MTIzOTAzMC80MjAxNjg3NzItNmRjZmQwZDItOTE0Mi00NWQ5LWI3NGUtZDEwYWE3NTA3M2M2Lm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMDclMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzA3VDAzMjIzOVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTdiZjFkNjlmYWNjMmEzOTliM2Y3M2VlYjgyNDRlZDJmOWE3NWZhZjE1MzhiZWY4YmQ3NjdkNTYwYTU5ZDA2MzYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UuHQCgWYkh0OQq9qsUWqGsUbhG3i9jcZDAMeHjLt5T4" data-canonical-src="https://private-user-images.githubusercontent.com/61239030/420168772-6dcfd0d2-9142-45d9-b74e-d10aa75073c6.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDEzMTgwNTksIm5iZiI6MTc0MTMxNzc1OSwicGF0aCI6Ii82MTIzOTAzMC80MjAxNjg3NzItNmRjZmQwZDItOTE0Mi00NWQ5LWI3NGUtZDEwYWE3NTA3M2M2Lm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMDclMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzA3VDAzMjIzOVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTdiZjFkNjlmYWNjMmEzOTliM2Y3M2VlYjgyNDRlZDJmOWE3NWZhZjE1MzhiZWY4YmQ3NjdkNTYwYTU5ZDA2MzYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UuHQCgWYkh0OQq9qsUWqGsUbhG3i9jcZDAMeHjLt5T4" controls="controls" muted="muted" class="d-block rounded-bottom-2 border-top width-fit" style="max-height:640px; min-height: 200px"></video>
|
| 20 |
+
|
| 21 |
+
## Installation
|
| 22 |
+
|
| 23 |
+
1. Create a new conda environment:
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
conda create -n open_manus python=3.12
|
| 27 |
+
conda activate open_manus
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
2. Clone the repository:
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
git clone https://github.com/mannaandpoem/OpenManus.git
|
| 34 |
+
cd OpenManus
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
3. Install dependencies:
|
| 38 |
+
|
| 39 |
+
```bash
|
| 40 |
+
pip install -r requirements.txt
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## Configuration
|
| 44 |
+
|
| 45 |
+
OpenManus requires configuration for the LLM APIs it uses. Follow these steps to set up your configuration:
|
| 46 |
+
|
| 47 |
+
1. Create a `config.toml` file in the `config` directory (you can copy from the example):
|
| 48 |
+
|
| 49 |
+
```bash
|
| 50 |
+
cp config/config.example.toml config/config.toml
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
2. Edit `config/config.toml` to add your API keys and customize settings:
|
| 54 |
+
|
| 55 |
+
```toml
|
| 56 |
+
# Global LLM configuration
|
| 57 |
+
[llm]
|
| 58 |
+
model = "gpt-4o"
|
| 59 |
+
base_url = "https://api.openai.com/v1"
|
| 60 |
+
api_key = "sk-..." # Replace with your actual API key
|
| 61 |
+
max_tokens = 4096
|
| 62 |
+
temperature = 0.0
|
| 63 |
+
|
| 64 |
+
# Optional configuration for specific LLM models
|
| 65 |
+
[llm.vision]
|
| 66 |
+
model = "gpt-4o"
|
| 67 |
+
base_url = "https://api.openai.com/v1"
|
| 68 |
+
api_key = "sk-..." # Replace with your actual API key
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## Quick Start
|
| 72 |
+
|
| 73 |
+
One line for run OpenManus:
|
| 74 |
+
|
| 75 |
+
```bash
|
| 76 |
+
python main.py
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
Then input your idea via terminal!
|
| 80 |
+
|
| 81 |
+
For unstable version, you also can run:
|
| 82 |
+
|
| 83 |
+
```bash
|
| 84 |
+
python run_flow.py
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
## How to contribute
|
| 88 |
+
|
| 89 |
+
We welcome any friendly suggestions and helpful contributions! Just create issues or submit pull requests.
|
| 90 |
+
|
| 91 |
+
Or contact @mannaandpoem via 📧email: mannaandpoem@gmail.com
|
| 92 |
+
|
| 93 |
+
## Roadmap
|
| 94 |
+
|
| 95 |
+
- [ ] Better Planning
|
| 96 |
+
- [ ] Live Demos
|
| 97 |
+
- [ ] Replay
|
| 98 |
+
- [ ] RL Fine-tuned Models
|
| 99 |
+
- [ ] Comprehensive Benchmarks
|
| 100 |
+
|
| 101 |
+
## Community Group
|
| 102 |
+
|
| 103 |
+
Join our networking group on Feishu and share your experience with other developers!
|
| 104 |
+
|
| 105 |
+
<div align="center" style="display: flex; gap: 20px;">
|
| 106 |
+
<img src="assets/community_group.jpg" alt="OpenManus 交流群" width="300" />
|
| 107 |
+
</div>
|
| 108 |
+
|
| 109 |
+
## Star History
|
| 110 |
+
|
| 111 |
+
[](https://star-history.com/#mannaandpoem/OpenManus&Date)
|
| 112 |
+
|
| 113 |
+
## Acknowledgement
|
| 114 |
+
|
| 115 |
+
Thanks to [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo) and [broswer-use](https://github.com/browser-use/browser-use) for providing basic support for this project!
|
| 116 |
+
|
| 117 |
+
OpenManus is built by contributors from MetaGPT. Huge thanks to this agent community!
|
OpenManus/README_tw.md
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[English](README.md) | 繁體中文 | [简体中文](README_zh.md)
|
| 2 |
+
|
| 3 |
+
<p align="left">
|
| 4 |
+
<a href="https://discord.gg/6dn7Sa3a"><img src="https://dcbadge.vercel.app/api/server/DYn29wFk9z?style=flat" alt="Discord Follow"></a>
|
| 5 |
+
</p>
|
| 6 |
+
|
| 7 |
+
# OpenManus 🙋
|
| 8 |
+
|
| 9 |
+
Manus 很棒,但 OpenManus 可以在沒有邀請碼的情況下實現任何想法 🛫!
|
| 10 |
+
|
| 11 |
+
我們的團隊成員 [@mannaandpoem](https://github.com/mannaandpoem) [@XiangJinyu](https://github.com/XiangJinyu) [@MoshiQAQ](https://github.com/MoshiQAQ) [@didiforgithub](https://github.com/didiforgithub) 來自 [@MetaGPT](https://github.com/geekan/MetaGPT) 在 3 小時內完成了它!
|
| 12 |
+
|
| 13 |
+
這是一個簡單的實現,所以我們歡迎任何建議、貢獻和反饋!
|
| 14 |
+
|
| 15 |
+
享受使用 OpenManus 的代理吧!
|
| 16 |
+
|
| 17 |
+
## 項目演示
|
| 18 |
+
|
| 19 |
+
<video src="https://private-user-images.githubusercontent.com/61239030/420168772-6dcfd0d2-9142-45d9-b74e-d10aa75073c6.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDEzMTgwNTksIm5iZiI6MTc0MTMxNzc1OSwicGF0aCI6Ii82MTIzOTAzMC80MjAxNjg3NzItNmRjZmQwZDItOTE0Mi00NWQ5LWI3NGUtZDEwYWE3NTA3M2M2Lm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMDclMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzA3VDAzMjIzOVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTdiZjFkNjlmYWNjMmEzOTliM2Y3M2VlYjgyNDRlZDJmOWE3NWZhZjE1MzhiZWY4YmQ3NjdkNTYwYTU5ZDA2MzYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UuHQCgWYkh0OQq9qsUWqGsUbhG3i9jcZDAMeHjLt5T4" data-canonical-src="https://private-user-images.githubusercontent.com/61239030/420168772-6dcfd0d2-9142-45d9-b74e-d10aa75073c6.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDEzMTgwNTksIm5iZiI6MTc0MTMxNzc1OSwicGF0aCI6Ii82MTIzOTAzMC80MjAxNjg3NzItNmRjZmQwZDItOTE0Mi00NWQ5LWI3NGUtZDEwYWE3NTA3M2M2Lm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMDclMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzA3VDAzMjIzOVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTdiZjFkNjlmYWNjMmEzOTliM2Y3M2VlYjgyNDRlZDJmOWE3NWZhZjE1MzhiZWY4YmQ3NjdkNTYwYTU5ZDA2MzYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UuHQCgWYkh0OQq9qsUWqGsUbhG3i9jcZDAMeHjLt5T4" controls="controls" muted="muted" class="d-block rounded-bottom-2 border-top width-fit" style="max-height:640px; min-height: 200px"></video>
|
| 20 |
+
|
| 21 |
+
## 安裝
|
| 22 |
+
|
| 23 |
+
1. 創建一個新的 conda 環境:
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
conda create -n open_manus python=3.12
|
| 27 |
+
conda activate open_manus
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
2. 克隆倉庫:
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
git clone https://github.com/mannaandpoem/OpenManus.git
|
| 34 |
+
cd OpenManus
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
3. 安裝依賴:
|
| 38 |
+
|
| 39 |
+
```bash
|
| 40 |
+
pip install -r requirements.txt
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## 配置
|
| 44 |
+
|
| 45 |
+
OpenManus 需要配置其使用的 LLM API。按照以下步驟設置您的配置:
|
| 46 |
+
|
| 47 |
+
1. 在 `config` 目錄中創建一個 `config.toml` 文件(您可以從示例中複製):
|
| 48 |
+
|
| 49 |
+
```bash
|
| 50 |
+
cp config/config.example.toml config/config.toml
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
2. 編輯 `config/config.toml` 以添加您的 API 密鑰並自定義設置:
|
| 54 |
+
|
| 55 |
+
```toml
|
| 56 |
+
# 全局 LLM 配置
|
| 57 |
+
[llm]
|
| 58 |
+
model = "gpt-4o"
|
| 59 |
+
base_url = "https://api.openai.com/v1"
|
| 60 |
+
api_key = "sk-..." # 替換為您的實際 API 密鑰
|
| 61 |
+
max_tokens = 4096
|
| 62 |
+
temperature = 0.0
|
| 63 |
+
|
| 64 |
+
# 特定 LLM 模型的可選配置
|
| 65 |
+
[llm.vision]
|
| 66 |
+
model = "gpt-4o"
|
| 67 |
+
base_url = "https://api.openai.com/v1"
|
| 68 |
+
api_key = "sk-..." # 替換為您的實際 API 密鑰
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## 快速開始
|
| 72 |
+
|
| 73 |
+
一行命令運行 OpenManus:
|
| 74 |
+
|
| 75 |
+
```bash
|
| 76 |
+
python main.py
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
然後通過終端輸入您的想法!
|
| 80 |
+
|
| 81 |
+
對於不穩定版本,您也可以運行:
|
| 82 |
+
|
| 83 |
+
```bash
|
| 84 |
+
python run_flow.py
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
## 如何貢獻
|
| 88 |
+
|
| 89 |
+
我們歡迎任何友好的建議和有幫助的貢獻!只需創建問題或提交拉取請求。
|
| 90 |
+
|
| 91 |
+
或者通過 📧 電子郵件聯繫 @mannaandpoem:mannaandpoem@gmail.com
|
| 92 |
+
|
| 93 |
+
## 路線圖
|
| 94 |
+
|
| 95 |
+
- [ ] 更好的規劃
|
| 96 |
+
- [ ] 實時演示
|
| 97 |
+
- [ ] 重播
|
| 98 |
+
- [ ] RL 微調模型
|
| 99 |
+
- [ ] 綜合基準測試
|
| 100 |
+
|
| 101 |
+
## 社區群組
|
| 102 |
+
|
| 103 |
+
加入我們的飛書網絡群組,與其他開發者分享您的經驗!
|
| 104 |
+
|
| 105 |
+
<div align="center" style="display: flex; gap: 20px;">
|
| 106 |
+
<img src="assets/community_group.jpg" alt="OpenManus 交流群" width="300" />
|
| 107 |
+
</div>
|
| 108 |
+
|
| 109 |
+
## 星標歷史
|
| 110 |
+
|
| 111 |
+
[](https://star-history.com/#mannaandpoem/OpenManus&Date)
|
| 112 |
+
|
| 113 |
+
## 致謝
|
| 114 |
+
|
| 115 |
+
感謝 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo) 和 [broswer-use](https://github.com/browser-use/browser-use) 為本項目提供基本支持!
|
| 116 |
+
|
| 117 |
+
OpenManus 由 MetaGPT 的貢獻者構建。非常感謝這個代理社區!
|
OpenManus/README_zh.md
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[English](README.md) | [繁體中文](README_tw.md) | 简体中文
|
| 2 |
+
|
| 3 |
+
<p align="left">
|
| 4 |
+
<a href="https://discord.gg/6dn7Sa3a"><img src="https://dcbadge.vercel.app/api/server/DYn29wFk9z?style=flat" alt="Discord Follow"></a>
|
| 5 |
+
</p>
|
| 6 |
+
|
| 7 |
+
# OpenManus 🙋
|
| 8 |
+
|
| 9 |
+
Manus 非常棒,但 OpenManus 无需邀请码即可实现任何创意 🛫!
|
| 10 |
+
|
| 11 |
+
我们来自 [@MetaGPT](https://github.com/geekan/MetaGPT) 的团队成员 [@mannaandpoem](https://github.com/mannaandpoem) [@XiangJinyu](https://github.com/XiangJinyu) [@MoshiQAQ](https://github.com/MoshiQAQ) [@didiforgithub](https://github.com/didiforgithub) 在 3 小时内完成了开发!
|
| 12 |
+
|
| 13 |
+
这是一个简洁的实现方案,欢迎任何建议、贡献和反馈!
|
| 14 |
+
|
| 15 |
+
用 OpenManus 开启你的智能体之旅吧!
|
| 16 |
+
|
| 17 |
+
## 项目演示
|
| 18 |
+
|
| 19 |
+
<video src="https://private-user-images.githubusercontent.com/61239030/420168772-6dcfd0d2-9142-45d9-b74e-d10aa75073c6.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDEzMTgwNTksIm5iZiI6MTc0MTMxNzc1OSwicGF0aCI6Ii82MTIzOTAzMC80MjAxNjg3NzItNmRjZmQwZDItOTE0Mi00NWQ5LWI3NGUtZDEwYWE3NTA3M2M2Lm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMDclMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzA3VDAzMjIzOVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTdiZjFkNjlmYWNjMmEzOTliM2Y3M2VlYjgyNDRlZDJmOWE3NWZhZjE1MzhiZWY4YmQ3NjdkNTYwYTU5ZDA2MzYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UuHQCgWYkh0OQq9qsUWqGsUbhG3i9jcZDAMeHjLt5T4" data-canonical-src="https://private-user-images.githubusercontent.com/61239030/420168772-6dcfd0d2-9142-45d9-b74e-d10aa75073c6.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NDEzMTgwNTksIm5iZiI6MTc0MTMxNzc1OSwicGF0aCI6Ii82MTIzOTAzMC80MjAxNjg3NzItNmRjZmQwZDItOTE0Mi00NWQ5LWI3NGUtZDEwYWE3NTA3M2M2Lm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAzMDclMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMzA3VDAzMjIzOVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTdiZjFkNjlmYWNjMmEzOTliM2Y3M2VlYjgyNDRlZDJmOWE3NWZhZjE1MzhiZWY4YmQ3NjdkNTYwYTU5ZDA2MzYmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.UuHQCgWYkh0OQq9qsUWqGsUbhG3i9jcZDAMeHjLt5T4" controls="controls" muted="muted" class="d-block rounded-bottom-2 border-top width-fit" style="max-height:640px; min-height: 200px"></video>
|
| 20 |
+
|
| 21 |
+
## 安装指南
|
| 22 |
+
|
| 23 |
+
1. 创建新的 conda 环境:
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
conda create -n open_manus python=3.12
|
| 27 |
+
conda activate open_manus
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
2. 克隆仓库:
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
git clone https://github.com/mannaandpoem/OpenManus.git
|
| 34 |
+
cd OpenManus
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
3. 安装依赖:
|
| 38 |
+
|
| 39 |
+
```bash
|
| 40 |
+
pip install -r requirements.txt
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## 配置说明
|
| 44 |
+
|
| 45 |
+
OpenManus 需要配置使用的 LLM API,请按以下步骤设置:
|
| 46 |
+
|
| 47 |
+
1. 在 `config` 目录创建 `config.toml` 文件(可从示例复制):
|
| 48 |
+
|
| 49 |
+
```bash
|
| 50 |
+
cp config/config.example.toml config/config.toml
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
2. 编辑 `config/config.toml` 添加 API 密钥和自定义设置:
|
| 54 |
+
|
| 55 |
+
```toml
|
| 56 |
+
# 全局 LLM 配置
|
| 57 |
+
[llm]
|
| 58 |
+
model = "gpt-4o"
|
| 59 |
+
base_url = "https://api.openai.com/v1"
|
| 60 |
+
api_key = "sk-..." # 替换为真实 API 密钥
|
| 61 |
+
max_tokens = 4096
|
| 62 |
+
temperature = 0.0
|
| 63 |
+
|
| 64 |
+
# 可选特定 LLM 模型配置
|
| 65 |
+
[llm.vision]
|
| 66 |
+
model = "gpt-4o"
|
| 67 |
+
base_url = "https://api.openai.com/v1"
|
| 68 |
+
api_key = "sk-..." # 替换为真实 API 密钥
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## 快速启动
|
| 72 |
+
|
| 73 |
+
一行命令运行 OpenManus:
|
| 74 |
+
|
| 75 |
+
```bash
|
| 76 |
+
python main.py
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
然后通过终端输入你的创意!
|
| 80 |
+
|
| 81 |
+
如需体验开发中版本,可运行:
|
| 82 |
+
|
| 83 |
+
```bash
|
| 84 |
+
python run_flow.py
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
## 贡献指南
|
| 88 |
+
|
| 89 |
+
我们欢迎任何友好的建议和有价值的贡献!可以直接创建 issue 或提交 pull request。
|
| 90 |
+
|
| 91 |
+
或通过 📧 邮件联系 @mannaandpoem:mannaandpoem@gmail.com
|
| 92 |
+
|
| 93 |
+
## 发展路线
|
| 94 |
+
|
| 95 |
+
- [ ] 更优的规划系统
|
| 96 |
+
- [ ] 实时演示功能
|
| 97 |
+
- [ ] 运行回放
|
| 98 |
+
- [ ] 强化学习微调模型
|
| 99 |
+
- [ ] 全面的性能基准测试
|
| 100 |
+
|
| 101 |
+
## 交流群
|
| 102 |
+
|
| 103 |
+
加入我们的飞书交流群,与其他开发者分享经验!
|
| 104 |
+
|
| 105 |
+
<div align="center" style="display: flex; gap: 20px;">
|
| 106 |
+
<img src="assets/community_group.jpg" alt="OpenManus 交流群" width="300" />
|
| 107 |
+
</div>
|
| 108 |
+
|
| 109 |
+
## Star 数量
|
| 110 |
+
|
| 111 |
+
[](https://star-history.com/#mannaandpoem/OpenManus&Date)
|
| 112 |
+
|
| 113 |
+
## 致谢
|
| 114 |
+
|
| 115 |
+
特别感谢 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo) 和 [broswer-use](https://github.com/browser-use/browser-use) 为本项目提供的基础支持!
|
| 116 |
+
|
| 117 |
+
OpenManus 由 MetaGPT 社区的贡献者共同构建,感谢这个充满活力的智能体开发者社区!
|
OpenManus/main.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio # asyncio 是一个用于编写异步程序的标准库,支持协程(coroutines)和事件循环。它允许在单线程中并发执行多个任务
|
| 2 |
+
|
| 3 |
+
from app.agent.manus import Manus
|
| 4 |
+
from app.logger import logger
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
async def main(): # 定义一个异步函数
|
| 8 |
+
agent = Manus()
|
| 9 |
+
while True:
|
| 10 |
+
try:
|
| 11 |
+
prompt = input("Enter your prompt (or 'exit' to quit): ")
|
| 12 |
+
if prompt.lower() == "exit":
|
| 13 |
+
logger.info("Goodbye!")
|
| 14 |
+
break
|
| 15 |
+
logger.warning("Processing your request...")
|
| 16 |
+
await agent.run(prompt) # 因为 run 方法是一个异步方法(由 await 关键字可知),所以需要使用 await 等待其完成。
|
| 17 |
+
except KeyboardInterrupt:
|
| 18 |
+
logger.warning("Goodbye!")
|
| 19 |
+
break
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
# 调用 asyncio.run() 函数运行异步函数 main()。
|
| 24 |
+
# asyncio.run() 是 Python 3.7 引入的一个函数,用于启动一个异步程序的事件循环。
|
| 25 |
+
# 它会阻塞当前线程,直到 main() 函数执行完毕。
|
| 26 |
+
asyncio.run(main())
|
OpenManus/requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pydantic~=2.10.4
|
| 2 |
+
openai~=1.58.1
|
| 3 |
+
tenacity~=9.0.0
|
| 4 |
+
pyyaml~=6.0.2
|
| 5 |
+
loguru~=0.7.3
|
| 6 |
+
numpy
|
| 7 |
+
datasets~=3.2.0
|
| 8 |
+
|
| 9 |
+
html2text~=2024.2.26
|
| 10 |
+
gymnasium~=1.0.0
|
| 11 |
+
pillow~=10.4.0
|
| 12 |
+
browsergym~=0.13.3
|
| 13 |
+
uvicorn~=0.34.0
|
| 14 |
+
unidiff~=0.7.5
|
| 15 |
+
browser-use~=0.1.40
|
| 16 |
+
googlesearch-python~=1.3.0
|
| 17 |
+
|
| 18 |
+
aiofiles~=24.1.0
|
| 19 |
+
pydantic_core~=2.27.2
|
| 20 |
+
colorama~=0.4.6
|
| 21 |
+
playwright~=1.49.1
|
OpenManus/run_flow.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
|
| 3 |
+
from app.agent.manus import Manus
|
| 4 |
+
from app.flow.base import FlowType
|
| 5 |
+
from app.flow.flow_factory import FlowFactory
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
async def run_flow():
|
| 9 |
+
agent = Manus()
|
| 10 |
+
|
| 11 |
+
while True:
|
| 12 |
+
try:
|
| 13 |
+
prompt = input("Enter your prompt (or 'exit' to quit): ")
|
| 14 |
+
if prompt.lower() == "exit":
|
| 15 |
+
print("Goodbye!")
|
| 16 |
+
break
|
| 17 |
+
|
| 18 |
+
flow = FlowFactory.create_flow(
|
| 19 |
+
flow_type=FlowType.PLANNING,
|
| 20 |
+
agents=agent,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
print("Processing your request...")
|
| 24 |
+
result = await flow.execute(prompt)
|
| 25 |
+
print(result)
|
| 26 |
+
|
| 27 |
+
except KeyboardInterrupt:
|
| 28 |
+
print("Goodbye!")
|
| 29 |
+
break
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
if __name__ == "__main__":
|
| 33 |
+
asyncio.run(run_flow())
|
OpenManus/setup.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import find_packages, setup
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
with open("README.md", "r", encoding="utf-8") as fh:
|
| 5 |
+
long_description = fh.read()
|
| 6 |
+
|
| 7 |
+
setup(
|
| 8 |
+
name="openmanus",
|
| 9 |
+
version="0.1.0",
|
| 10 |
+
author="mannaandpoem and OpenManus Team",
|
| 11 |
+
author_email="mannaandpoem@gmail.com",
|
| 12 |
+
description="A versatile agent that can solve various tasks using multiple tools",
|
| 13 |
+
long_description=long_description,
|
| 14 |
+
long_description_content_type="text/markdown",
|
| 15 |
+
url="https://github.com/mannaandpoem/OpenManus",
|
| 16 |
+
packages=find_packages(),
|
| 17 |
+
install_requires=[
|
| 18 |
+
"pydantic~=2.10.4",
|
| 19 |
+
"openai~=1.58.1",
|
| 20 |
+
"tenacity~=9.0.0",
|
| 21 |
+
"pyyaml~=6.0.2",
|
| 22 |
+
"loguru~=0.7.3",
|
| 23 |
+
"numpy",
|
| 24 |
+
"datasets~=3.2.0",
|
| 25 |
+
"html2text~=2024.2.26",
|
| 26 |
+
"gymnasium~=1.0.0",
|
| 27 |
+
"pillow~=10.4.0",
|
| 28 |
+
"browsergym~=0.13.3",
|
| 29 |
+
"uvicorn~=0.34.0",
|
| 30 |
+
"unidiff~=0.7.5",
|
| 31 |
+
"browser-use~=0.1.40",
|
| 32 |
+
"googlesearch-python~=1.3.0",
|
| 33 |
+
"aiofiles~=24.1.0",
|
| 34 |
+
"pydantic_core~=2.27.2",
|
| 35 |
+
"colorama~=0.4.6",
|
| 36 |
+
],
|
| 37 |
+
classifiers=[
|
| 38 |
+
"Programming Language :: Python :: 3",
|
| 39 |
+
"Programming Language :: Python :: 3.12",
|
| 40 |
+
"License :: OSI Approved :: MIT License",
|
| 41 |
+
"Operating System :: OS Independent",
|
| 42 |
+
],
|
| 43 |
+
python_requires=">=3.12",
|
| 44 |
+
entry_points={
|
| 45 |
+
"console_scripts": [
|
| 46 |
+
"openmanus=main:main",
|
| 47 |
+
],
|
| 48 |
+
},
|
| 49 |
+
)
|
deep_search/3-12-1200qa_doc_by_itself_qwen.log
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [108, 93, 75, 56, 37, 18]
|
| 2 |
+
step_list: [108, 93]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/checkpoint-108) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/108/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/108/simpleqa/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/checkpoint-93) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/93/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/checkpoint-93 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/93/simpleqa/inf.log 2>&1 &
|
| 9 |
+
All checkpoints exist. Wait for runing...
|
| 10 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 11 |
+
The following command is about to run:
|
| 12 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/108/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/108/simpleqa/inf.log 2>&1 &
|
| 13 |
+
The following command is about to run:
|
| 14 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/93/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/checkpoint-93 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4/93/simpleqa/inf.log 2>&1 &
|
| 15 |
+
Wish me good luck!
|
deep_search/3-12-1200qa_doc_by_itself_qwq.log
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [108, 93, 75, 56, 37, 18]
|
| 3 |
+
step_list: [108, 93]
|
| 4 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/checkpoint-108) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/108/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/108/simpleqa/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/checkpoint-93) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/93/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/checkpoint-93 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/93/simpleqa/inf.log 2>&1 &
|
| 10 |
+
All checkpoints exist. Wait for runing...
|
| 11 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 12 |
+
The following command is about to run:
|
| 13 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/108/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/108/simpleqa/inf.log 2>&1 &
|
| 14 |
+
The following command is about to run:
|
| 15 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name simpleqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/93/simpleqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --subset_num 500 --model_path /share/project/sunshuang/deep_search/output/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/checkpoint-93 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/JOB:5701#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4/93/simpleqa/inf.log 2>&1 &
|
| 16 |
+
Wish me good luck!
|
deep_search/3-15-correct_solutions_sft_add_prompt_doc_by_itself_qwq.log
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [23, 46, 69, 93]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-23) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/23/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/23/eval/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-46) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/46/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/46/eval/inf.log 2>&1 &
|
| 9 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-69) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/69/eval/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-93) to exist...
|
| 13 |
+
available_gpus: [4, 5, 6, 7]
|
| 14 |
+
The following command is about to run:
|
| 15 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/23/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/23/eval/inf.log 2>&1 &
|
| 16 |
+
The following command is about to run:
|
| 17 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/46/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/46/eval/inf.log 2>&1 &
|
| 18 |
+
Already waiting 0:02:00.
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/93/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-93 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/93/eval/inf.log 2>&1 &
|
| 21 |
+
All checkpoints exist. Wait for runing...
|
| 22 |
+
available_gpus: []
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: [6, 7]
|
| 73 |
+
The following command is about to run:
|
| 74 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/69/eval/inf.log 2>&1 &
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: [4, 5]
|
| 78 |
+
The following command is about to run:
|
| 79 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/93/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-93 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/93/eval/inf.log 2>&1 &
|
| 80 |
+
Wish me good luck!
|
deep_search/3-18-2kqa_doc_by_itself_DeepSeek-R1-Distill-Qwen-32B.log
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [108, 91, 73, 54, 36, 18]
|
| 2 |
+
step_list: [108, 91, 73]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval/inf.log 2>&1 &
|
| 9 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval/inf.log 2>&1 &
|
| 12 |
+
All checkpoints exist. Wait for runing...
|
| 13 |
+
available_gpus: [6, 7]
|
| 14 |
+
The following command is about to run:
|
| 15 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval/inf.log 2>&1 &
|
| 16 |
+
available_gpus: []
|
| 17 |
+
available_gpus: []
|
| 18 |
+
available_gpus: []
|
| 19 |
+
available_gpus: []
|
| 20 |
+
available_gpus: []
|
| 21 |
+
available_gpus: []
|
| 22 |
+
available_gpus: []
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: [2, 3, 7]
|
| 57 |
+
The following command is about to run:
|
| 58 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval/inf.log 2>&1 &
|
| 59 |
+
available_gpus: [6, 7]
|
| 60 |
+
The following command is about to run:
|
| 61 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval/inf.log 2>&1 &
|
| 62 |
+
Wish me good luck!
|
deep_search/3-18-2kqa_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [108, 91, 73, 54, 36, 18]
|
| 2 |
+
step_list: [108, 91, 73]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval/inf.log 2>&1 &
|
| 9 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval/inf.log 2>&1 &
|
| 12 |
+
All checkpoints exist. Wait for runing...
|
| 13 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 14 |
+
The following command is about to run:
|
| 15 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval/inf.log 2>&1 &
|
| 16 |
+
The following command is about to run:
|
| 17 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval/inf.log 2>&1 &
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval/inf.log 2>&1 &
|
| 20 |
+
Wish me good luck!
|
deep_search/3-18-2kqa_doc_by_itself_Qwen2.5-32B-Instruct.log
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [108, 91, 73, 54, 36, 18]
|
| 2 |
+
step_list: [108, 91, 73]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval/inf.log 2>&1 &
|
| 9 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval/inf.log 2>&1 &
|
| 12 |
+
All checkpoints exist. Wait for runing...
|
| 13 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 14 |
+
The following command is about to run:
|
| 15 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/108/eval/inf.log 2>&1 &
|
| 16 |
+
The following command is about to run:
|
| 17 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/91/eval/inf.log 2>&1 &
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/73/eval/inf.log 2>&1 &
|
| 20 |
+
Wish me good luck!
|
deep_search/3-20-new_instruction_2k_sft_remove_exp_doc_by_itself_Qwen2.5-7B-Instruct.log
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [108, 91, 73, 54, 36, 18]
|
| 2 |
+
step_list: [108, 91, 73, 54]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-108) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_zh/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_en/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_zh/inf.log 2>&1 &
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_en/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/eval/inf.log 2>&1 &
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_zh/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_en/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-54) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/eval/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_zh/inf.log 2>&1 &
|
| 22 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_en/inf.log 2>&1 &
|
| 23 |
+
All checkpoints exist. Wait for runing...
|
| 24 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/eval/inf.log 2>&1 &
|
| 27 |
+
The following command is about to run:
|
| 28 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_zh/inf.log 2>&1 &
|
| 29 |
+
The following command is about to run:
|
| 30 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/108/syn_en/inf.log 2>&1 &
|
| 31 |
+
The following command is about to run:
|
| 32 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/eval/inf.log 2>&1 &
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: [2, 3]
|
| 43 |
+
The following command is about to run:
|
| 44 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_zh/inf.log 2>&1 &
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: [4, 5]
|
| 47 |
+
The following command is about to run:
|
| 48 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/syn_en/inf.log 2>&1 &
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: [2, 3]
|
| 55 |
+
The following command is about to run:
|
| 56 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/eval/inf.log 2>&1 &
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: [4, 5]
|
| 62 |
+
The following command is about to run:
|
| 63 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_zh/inf.log 2>&1 &
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: [6, 7]
|
| 70 |
+
The following command is about to run:
|
| 71 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_en/inf.log 2>&1 &
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: [5]
|
| 75 |
+
available_gpus: [4, 5]
|
| 76 |
+
The following command is about to run:
|
| 77 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/eval/inf.log 2>&1 &
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: [0, 1]
|
| 82 |
+
The following command is about to run:
|
| 83 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_zh/inf.log 2>&1 &
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: [6, 7]
|
| 87 |
+
The following command is about to run:
|
| 88 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/54/syn_en/inf.log 2>&1 &
|
| 89 |
+
Wish me good luck!
|
deep_search/3-22-merged_2462_nq_237_total_2699_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [252, 211, 169, 126, 84, 42]
|
| 2 |
+
step_list: [252, 211, 169, 126]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-252) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-252 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-252 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_zh/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-252 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_en/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_zh/inf.log 2>&1 &
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_en/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-169) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-169 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/eval/inf.log 2>&1 &
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-169 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_zh/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-169 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_en/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-126) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-126 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/eval/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-126 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_zh/inf.log 2>&1 &
|
| 22 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-126 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_en/inf.log 2>&1 &
|
| 23 |
+
All checkpoints exist. Wait for runing...
|
| 24 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-252 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/eval/inf.log 2>&1 &
|
| 27 |
+
The following command is about to run:
|
| 28 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-252 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_zh/inf.log 2>&1 &
|
| 29 |
+
The following command is about to run:
|
| 30 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-252 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/252/syn_en/inf.log 2>&1 &
|
| 31 |
+
The following command is about to run:
|
| 32 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/eval/inf.log 2>&1 &
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: [4, 5]
|
| 46 |
+
The following command is about to run:
|
| 47 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_zh/inf.log 2>&1 &
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: [2, 3]
|
| 54 |
+
The following command is about to run:
|
| 55 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/syn_en/inf.log 2>&1 &
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: [4, 5]
|
| 65 |
+
The following command is about to run:
|
| 66 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-169 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/eval/inf.log 2>&1 &
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: [2, 3]
|
| 70 |
+
The following command is about to run:
|
| 71 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-169 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_zh/inf.log 2>&1 &
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: [6, 7]
|
| 86 |
+
The following command is about to run:
|
| 87 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-169 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/169/syn_en/inf.log 2>&1 &
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: [2, 3]
|
| 90 |
+
The following command is about to run:
|
| 91 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-126 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/eval/inf.log 2>&1 &
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: [0, 1]
|
| 95 |
+
The following command is about to run:
|
| 96 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_zh --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_zh --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-126 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_zh/inf.log 2>&1 &
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: [6, 7]
|
| 105 |
+
The following command is about to run:
|
| 106 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name syn_en --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_en --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-126 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/126/syn_en/inf.log 2>&1 &
|
| 107 |
+
Wish me good luck!
|
deep_search/3-25-strict_selected_1526_sft_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 2 |
+
step_list: [144, 120, 96]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn/inf.log 2>&1 &
|
| 15 |
+
All checkpoints exist. Wait for runing...
|
| 16 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 17 |
+
The following command is about to run:
|
| 18 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 19 |
+
The following command is about to run:
|
| 20 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn/inf.log 2>&1 &
|
| 21 |
+
The following command is about to run:
|
| 22 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn/inf.log 2>&1 &
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: [2, 3]
|
| 60 |
+
The following command is about to run:
|
| 61 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: [6, 7]
|
| 65 |
+
The following command is about to run:
|
| 66 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn/inf.log 2>&1 &
|
| 67 |
+
Wish me good luck!
|
deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-32B-Instruct.log
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 2 |
+
step_list: [144, 120, 96]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn/inf.log 2>&1 &
|
| 15 |
+
All checkpoints exist. Wait for runing...
|
| 16 |
+
available_gpus: []
|
| 17 |
+
available_gpus: []
|
| 18 |
+
available_gpus: []
|
| 19 |
+
available_gpus: []
|
| 20 |
+
available_gpus: []
|
| 21 |
+
available_gpus: []
|
| 22 |
+
available_gpus: []
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: [2, 3]
|
| 49 |
+
The following command is about to run:
|
| 50 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: [6, 7]
|
| 54 |
+
The following command is about to run:
|
| 55 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn/inf.log 2>&1 &
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: [4, 5]
|
| 62 |
+
The following command is about to run:
|
| 63 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: [0, 1]
|
| 74 |
+
The following command is about to run:
|
| 75 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn/inf.log 2>&1 &
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: [0, 1]
|
| 97 |
+
The following command is about to run:
|
| 98 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: []
|
| 116 |
+
available_gpus: []
|
| 117 |
+
available_gpus: []
|
| 118 |
+
available_gpus: []
|
| 119 |
+
available_gpus: [0, 1]
|
| 120 |
+
The following command is about to run:
|
| 121 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn/inf.log 2>&1 &
|
| 122 |
+
Wish me good luck!
|
deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-32B-Instruct_eval.log
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 3 |
+
step_list: [144, 120, 96]
|
| 4 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 13 |
+
All checkpoints exist. Wait for runing...
|
| 14 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 15 |
+
The following command is about to run:
|
| 16 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 17 |
+
The following command is about to run:
|
| 18 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 19 |
+
The following command is about to run:
|
| 20 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 21 |
+
Wish me good luck!
|
deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-7B-Instruct.log
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 2 |
+
step_list: [144, 120, 96]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn/inf.log 2>&1 &
|
| 15 |
+
All checkpoints exist. Wait for runing...
|
| 16 |
+
available_gpus: []
|
| 17 |
+
available_gpus: []
|
| 18 |
+
available_gpus: []
|
| 19 |
+
available_gpus: []
|
| 20 |
+
available_gpus: []
|
| 21 |
+
available_gpus: []
|
| 22 |
+
available_gpus: []
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: [2, 3]
|
| 47 |
+
The following command is about to run:
|
| 48 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: [6, 7]
|
| 52 |
+
The following command is about to run:
|
| 53 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/musique_syn/inf.log 2>&1 &
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: [4, 5]
|
| 60 |
+
The following command is about to run:
|
| 61 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: [0, 1]
|
| 72 |
+
The following command is about to run:
|
| 73 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/musique_syn/inf.log 2>&1 &
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: [0, 1]
|
| 95 |
+
The following command is about to run:
|
| 96 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: []
|
| 116 |
+
available_gpus: [0, 1]
|
| 117 |
+
The following command is about to run:
|
| 118 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name musique_syn --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/musique_syn/inf.log 2>&1 &
|
| 119 |
+
Wish me good luck!
|
deep_search/3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-7B-Instruct_eval.log
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 3 |
+
step_list: [144, 120, 96]
|
| 4 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
skip evaluated model: JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval
|
| 13 |
+
All checkpoints exist. Wait for runing...
|
| 14 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6]
|
| 15 |
+
The following command is about to run:
|
| 16 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/eval/inf.log 2>&1 &
|
| 17 |
+
The following command is about to run:
|
| 18 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/120/eval/inf.log 2>&1 &
|
| 19 |
+
Wish me good luck!
|
deep_search/3-26_selected_data_1174_sft_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [108, 91, 73, 54, 36, 18]
|
| 2 |
+
step_list: [108, 91, 73]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-108) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/realqa/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/realqa/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-73) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/realqa/inf.log 2>&1 &
|
| 15 |
+
All checkpoints exist. Wait for runing...
|
| 16 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 17 |
+
The following command is about to run:
|
| 18 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/eval/inf.log 2>&1 &
|
| 19 |
+
The following command is about to run:
|
| 20 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-108 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/108/realqa/inf.log 2>&1 &
|
| 21 |
+
The following command is about to run:
|
| 22 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval/inf.log 2>&1 &
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/realqa/inf.log 2>&1 &
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: [0, 1, 4, 5]
|
| 66 |
+
The following command is about to run:
|
| 67 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/eval/inf.log 2>&1 &
|
| 68 |
+
The following command is about to run:
|
| 69 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-73 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/73/realqa/inf.log 2>&1 &
|
| 70 |
+
Wish me good luck!
|
deep_search/3-26_strict_selected_1526_sft_format_ans_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 2 |
+
step_list: [144, 120, 96]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/realqa/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-120) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/realqa/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa/inf.log 2>&1 &
|
| 15 |
+
All checkpoints exist. Wait for runing...
|
| 16 |
+
available_gpus: []
|
| 17 |
+
available_gpus: []
|
| 18 |
+
available_gpus: []
|
| 19 |
+
available_gpus: []
|
| 20 |
+
available_gpus: []
|
| 21 |
+
available_gpus: []
|
| 22 |
+
available_gpus: []
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: [0, 1, 4, 5]
|
| 43 |
+
The following command is about to run:
|
| 44 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval/inf.log 2>&1 &
|
| 45 |
+
The following command is about to run:
|
| 46 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/realqa/inf.log 2>&1 &
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: [0, 1]
|
| 50 |
+
The following command is about to run:
|
| 51 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/eval/inf.log 2>&1 &
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: [6, 7]
|
| 58 |
+
The following command is about to run:
|
| 59 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/realqa/inf.log 2>&1 &
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: [3]
|
| 89 |
+
available_gpus: [2, 3]
|
| 90 |
+
The following command is about to run:
|
| 91 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/eval/inf.log 2>&1 &
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: [0, 1]
|
| 97 |
+
The following command is about to run:
|
| 98 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa/inf.log 2>&1 &
|
| 99 |
+
Wish me good luck!
|
deep_search/3-26_strict_selected_1526_sft_format_ans_doc_by_itself_QwQ-32B_1.log
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 3 |
+
step_list: [144, 120, 96]
|
| 4 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval/inf.log 2>&1 &
|
| 7 |
+
skip evaluated model: JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/realqa
|
| 8 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-120) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
skip evaluated model: JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/eval
|
| 11 |
+
skip evaluated model: JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/120/realqa
|
| 12 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
skip evaluated model: JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/eval
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa/inf.log 2>&1 &
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [0, 1, 4, 5]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/144/eval/inf.log 2>&1 &
|
| 20 |
+
The following command is about to run:
|
| 21 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans/96/realqa/inf.log 2>&1 &
|
| 22 |
+
Wish me good luck!
|
deep_search/3-27_merged_syn_long_359_sft_1533_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [144, 120, 96, 72, 48, 24]
|
| 2 |
+
step_list: [144, 120, 96]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/realqa/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-120) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/realqa/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-96) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/realqa/inf.log 2>&1 &
|
| 15 |
+
All checkpoints exist. Wait for runing...
|
| 16 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 17 |
+
The following command is about to run:
|
| 18 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval/inf.log 2>&1 &
|
| 19 |
+
The following command is about to run:
|
| 20 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/realqa/inf.log 2>&1 &
|
| 21 |
+
The following command is about to run:
|
| 22 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/eval/inf.log 2>&1 &
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-120 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/120/realqa/inf.log 2>&1 &
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: [2, 3]
|
| 69 |
+
The following command is about to run:
|
| 70 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/eval/inf.log 2>&1 &
|
| 71 |
+
available_gpus: [0, 1]
|
| 72 |
+
The following command is about to run:
|
| 73 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/96/realqa/inf.log 2>&1 &
|
| 74 |
+
Wish me good luck!
|
deep_search/3-27_merged_syn_short_398_sft_1572_doc_by_itself_QwQ-32B_1.log
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [144, 123, 99, 74, 49, 24]
|
| 3 |
+
step_list: [144, 123, 99, 74, 49, 24]
|
| 4 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/checkpoint-144) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/144/eval
|
| 7 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/144/realqa
|
| 8 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/checkpoint-123) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/123/eval
|
| 11 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/123/realqa
|
| 12 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/checkpoint-99) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/99/eval
|
| 15 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/99/realqa
|
| 16 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/checkpoint-74) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/74/eval
|
| 19 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/74/realqa
|
| 20 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/checkpoint-49) to exist...
|
| 21 |
+
The checkpoint exists. Waiting for running...
|
| 22 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/49/eval
|
| 23 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/49/realqa
|
| 24 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/checkpoint-24) to exist...
|
| 25 |
+
The checkpoint exists. Waiting for running...
|
| 26 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/24/eval
|
| 27 |
+
skip evaluated model: JOB:16755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_short_398_sft_1572/24/realqa
|
| 28 |
+
All checkpoints exist. Wait for runing...
|
| 29 |
+
Wish me good luck!
|
deep_search/4-2_cleaned_871_359_total_1065_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [96, 81, 65, 48, 32, 16]
|
| 2 |
+
step_list: [96, 81, 65, 48, 32, 16]
|
| 3 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-96) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/realqa/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/gaia/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-81) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/realqa/inf.log 2>&1 &
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/gaia/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-65) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-65 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/eval/inf.log 2>&1 &
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-65 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/realqa/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-65 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/gaia/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-48) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-48 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/eval/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-48 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/realqa/inf.log 2>&1 &
|
| 22 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-48 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/gaia/inf.log 2>&1 &
|
| 23 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-32) to exist...
|
| 24 |
+
The checkpoint exists. Waiting for running...
|
| 25 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-32 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/eval/inf.log 2>&1 &
|
| 26 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-32 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/realqa/inf.log 2>&1 &
|
| 27 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-32 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/gaia/inf.log 2>&1 &
|
| 28 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-16) to exist...
|
| 29 |
+
The checkpoint exists. Waiting for running...
|
| 30 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-16 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/eval/inf.log 2>&1 &
|
| 31 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-16 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/realqa/inf.log 2>&1 &
|
| 32 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-16 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/gaia/inf.log 2>&1 &
|
| 33 |
+
All checkpoints exist. Wait for runing...
|
| 34 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 35 |
+
The following command is about to run:
|
| 36 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/eval/inf.log 2>&1 &
|
| 37 |
+
The following command is about to run:
|
| 38 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/realqa/inf.log 2>&1 &
|
| 39 |
+
The following command is about to run:
|
| 40 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-96 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/96/gaia/inf.log 2>&1 &
|
| 41 |
+
The following command is about to run:
|
| 42 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/eval/inf.log 2>&1 &
|
| 43 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 44 |
+
The following command is about to run:
|
| 45 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/realqa/inf.log 2>&1 &
|
| 46 |
+
The following command is about to run:
|
| 47 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/81/gaia/inf.log 2>&1 &
|
| 48 |
+
The following command is about to run:
|
| 49 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-65 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/eval/inf.log 2>&1 &
|
| 50 |
+
The following command is about to run:
|
| 51 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-65 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/realqa/inf.log 2>&1 &
|
| 52 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 53 |
+
The following command is about to run:
|
| 54 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-65 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/65/gaia/inf.log 2>&1 &
|
| 55 |
+
The following command is about to run:
|
| 56 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-48 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/eval/inf.log 2>&1 &
|
| 57 |
+
The following command is about to run:
|
| 58 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-48 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/realqa/inf.log 2>&1 &
|
| 59 |
+
The following command is about to run:
|
| 60 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-48 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/48/gaia/inf.log 2>&1 &
|
| 61 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 62 |
+
The following command is about to run:
|
| 63 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-32 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/eval/inf.log 2>&1 &
|
| 64 |
+
The following command is about to run:
|
| 65 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-32 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/realqa/inf.log 2>&1 &
|
| 66 |
+
The following command is about to run:
|
| 67 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-32 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/32/gaia/inf.log 2>&1 &
|
| 68 |
+
The following command is about to run:
|
| 69 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-16 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/eval/inf.log 2>&1 &
|
| 70 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 71 |
+
The following command is about to run:
|
| 72 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-16 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/realqa/inf.log 2>&1 &
|
| 73 |
+
The following command is about to run:
|
| 74 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/checkpoint-16 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30712#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_cleaned_871_359_total_1065/16/gaia/inf.log 2>&1 &
|
| 75 |
+
Wish me good luck!
|
deep_search/4-3_871_change_hotpotqa_train_871_doc_by_itself_QwQ-32B_1.log
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41, 27, 13]
|
| 3 |
+
step_list: [78, 68, 55, 41]
|
| 4 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/78/eval
|
| 7 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/78/realqa
|
| 8 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/78/gaia
|
| 9 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/checkpoint-68) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/68/eval
|
| 12 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/68/realqa
|
| 13 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/68/gaia
|
| 14 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/checkpoint-55) to exist...
|
| 15 |
+
The checkpoint exists. Waiting for running...
|
| 16 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/55/eval
|
| 17 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/55/realqa
|
| 18 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/55/gaia
|
| 19 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/output/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/checkpoint-41) to exist...
|
| 20 |
+
The checkpoint exists. Waiting for running...
|
| 21 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/41/eval
|
| 22 |
+
skip evaluated model: JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/41/realqa
|
| 23 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/41/gaia/inf.log 2>&1 &
|
| 24 |
+
All checkpoints exist. Wait for runing...
|
| 25 |
+
available_gpus: [2, 3]
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/output/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:27980#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_871_change_hotpotqa_train_871/41/gaia/inf.log 2>&1 &
|
| 28 |
+
Wish me good luck!
|
deep_search/4-4_871_doc_by_itself_qwen7b_inst.log
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41, 27, 13]
|
| 3 |
+
step_list: [78, 68, 55, 41]
|
| 4 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/78/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/78/eval_old_500/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-68) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/68/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/68/eval_old_500/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-55) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/55/eval_old_500/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-41) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/41/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/41/eval_old_500/inf.log 2>&1 &
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/78/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/78/eval_old_500/inf.log 2>&1 &
|
| 20 |
+
The following command is about to run:
|
| 21 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/68/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/68/eval_old_500/inf.log 2>&1 &
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/55/eval_old_500/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /share/project/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /share/project/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/41/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/41/eval_old_500/inf.log 2>&1 &
|
| 26 |
+
Wish me good luck!
|
deep_search/ds_zero3.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bf16": {
|
| 3 |
+
"enabled": "auto"
|
| 4 |
+
},
|
| 5 |
+
"zero_optimization": {
|
| 6 |
+
"stage": 3,
|
| 7 |
+
"overlap_comm": true,
|
| 8 |
+
"contiguous_gradients": true,
|
| 9 |
+
"sub_group_size": 5E8,
|
| 10 |
+
"reduce_bucket_size": "auto",
|
| 11 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 12 |
+
"stage3_param_persistence_threshold": "auto",
|
| 13 |
+
"stage3_max_live_parameters": 5E8,
|
| 14 |
+
"stage3_max_reuse_distance": 5E8,
|
| 15 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 16 |
+
},
|
| 17 |
+
"gradient_accumulation_steps": "auto",
|
| 18 |
+
"gradient_clipping": "auto",
|
| 19 |
+
"steps_per_print": 2000,
|
| 20 |
+
"train_batch_size": "auto",
|
| 21 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 22 |
+
"wall_clock_breakdown": false,
|
| 23 |
+
"flops_profiler": {
|
| 24 |
+
"enabled": false,
|
| 25 |
+
"profile_step": 10,
|
| 26 |
+
"module_depth": -1,
|
| 27 |
+
"top_modules": 3,
|
| 28 |
+
"detailed": true,
|
| 29 |
+
"output_file": "flops_profiler.out"
|
| 30 |
+
}
|
| 31 |
+
}
|
deep_search/ds_zero3_offload.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bf16": {
|
| 3 |
+
"enabled": "auto"
|
| 4 |
+
},
|
| 5 |
+
"zero_optimization": {
|
| 6 |
+
"stage": 3,
|
| 7 |
+
"offload_optimizer": {
|
| 8 |
+
"device": "cpu",
|
| 9 |
+
"pin_memory": true
|
| 10 |
+
},
|
| 11 |
+
"offload_param": {
|
| 12 |
+
"device": "cpu",
|
| 13 |
+
"pin_memory": true
|
| 14 |
+
},
|
| 15 |
+
"overlap_comm": true,
|
| 16 |
+
"contiguous_gradients": true,
|
| 17 |
+
"sub_group_size": 5E8,
|
| 18 |
+
"reduce_bucket_size": "auto",
|
| 19 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 20 |
+
"stage3_param_persistence_threshold": "auto",
|
| 21 |
+
"stage3_max_live_parameters": 5E8,
|
| 22 |
+
"stage3_max_reuse_distance": 5E8,
|
| 23 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 24 |
+
},
|
| 25 |
+
"gradient_accumulation_steps": "auto",
|
| 26 |
+
"gradient_clipping": "auto",
|
| 27 |
+
"steps_per_print": 2000,
|
| 28 |
+
"train_batch_size": "auto",
|
| 29 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 30 |
+
"wall_clock_breakdown": false,
|
| 31 |
+
"flops_profiler": {
|
| 32 |
+
"enabled": false,
|
| 33 |
+
"profile_step": 10,
|
| 34 |
+
"module_depth": -1,
|
| 35 |
+
"top_modules": 3,
|
| 36 |
+
"detailed": true,
|
| 37 |
+
"output_file": "flops_profiler.out"
|
| 38 |
+
}
|
| 39 |
+
}
|
deep_search/math_eval.sh
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
cd /opt/aps/workdir/math/evaluation
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=6,7
|
| 5 |
+
|
| 6 |
+
dataset_name=AIME24
|
| 7 |
+
model_name=DeepSeek-R1-Distill-Qwen-32
|
| 8 |
+
model_path=/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32
|
| 9 |
+
max_tokens=20000
|
| 10 |
+
|
| 11 |
+
target_path=/opt/aps/workdir/math/evaluation/outputs/${model_name}/${dataset_name}
|
| 12 |
+
log_path=/opt/aps/workdir/math/evaluation/logs/${model_name}/${dataset_name}
|
| 13 |
+
mkdir -p ${target_path}
|
| 14 |
+
mkdir -p ${log_path}
|
| 15 |
+
|
| 16 |
+
nohup /opt/aps/workdir/miniforge3/envs/search_o1/bin/python -u run_eval_32b.py \
|
| 17 |
+
--data_name ${dataset_name} \
|
| 18 |
+
--target_path ${target_path} \
|
| 19 |
+
--model_name_or_path ${model_path} \
|
| 20 |
+
--prompt v4 \
|
| 21 |
+
--max_tokens ${max_tokens} \
|
| 22 |
+
--paralle_size 2 > ${log_path}/inf.log 2>&1 &
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
dataset_name=AMC23
|
| 26 |
+
nohup /opt/aps/workdir/miniforge3/envs/search_o1/bin/python -u run_eval_32b.py \
|
| 27 |
+
--data_name ${dataset_name} \
|
| 28 |
+
--target_path /opt/aps/workdir/math/evaluation/outputs/${model_name}/${dataset_name} \
|
| 29 |
+
--model_name_or_path ${model_path} \
|
| 30 |
+
--prompt v4 \
|
| 31 |
+
--max_tokens ${max_tokens} \
|
| 32 |
+
--paralle_size 2 > /opt/aps/workdir/math/evaluation/logs/${model_name}/${dataset_name}/inf.log 2>&1 &
|
deep_search/mix.sh
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# export CUDA_VISIBLE_DEVICES=0,1,2,3
|
| 6 |
+
# --include localhost:0,1,2,3,4,5,6,7 \
|
| 7 |
+
|
| 8 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 9 |
+
# --master_port=9944 \
|
| 10 |
+
# --include localhost:4,5,6,7 \
|
| 11 |
+
# sft.py \
|
| 12 |
+
# --deepspeed ds_zero3.json \
|
| 13 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 14 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-1.5B \
|
| 15 |
+
# --do_train \
|
| 16 |
+
# --save_safetensors true \
|
| 17 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/merged_selected_data.json \
|
| 18 |
+
# --lr_scheduler_type cosine \
|
| 19 |
+
# --output_dir output/checkpoint/qwen_32B_test \
|
| 20 |
+
# --overwrite_output_dir \
|
| 21 |
+
# --warmup_ratio 0.03 \
|
| 22 |
+
# --gradient_checkpointing true \
|
| 23 |
+
# --per_device_train_batch_size 2 \
|
| 24 |
+
# --gradient_accumulation_steps 2 \
|
| 25 |
+
# --logging_steps 1 \
|
| 26 |
+
# --learning_rate 2e-5 \
|
| 27 |
+
# --num_train_epochs 2 \
|
| 28 |
+
# --save_steps 400 \
|
| 29 |
+
# --model_max_length 8192 \
|
| 30 |
+
# --save_total_limit 16 \
|
| 31 |
+
# --bf16 || exit 1
|
| 32 |
+
|
| 33 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 34 |
+
# --master_port=9944 \
|
| 35 |
+
# sft.py \
|
| 36 |
+
# --deepspeed ds_zero3.json \
|
| 37 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 38 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 39 |
+
# --do_train \
|
| 40 |
+
# --save_safetensors true \
|
| 41 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 42 |
+
# --lr_scheduler_type cosine \
|
| 43 |
+
# --output_dir output/checkpoint/qwen_2_5_32b_data_1217 \
|
| 44 |
+
# --overwrite_output_dir \
|
| 45 |
+
# --warmup_ratio 0.03 \
|
| 46 |
+
# --gradient_checkpointing true \
|
| 47 |
+
# --per_device_train_batch_size 1 \
|
| 48 |
+
# --gradient_accumulation_steps 4 \
|
| 49 |
+
# --logging_steps 1 \
|
| 50 |
+
# --learning_rate 2e-5 \
|
| 51 |
+
# --num_train_epochs 1 \
|
| 52 |
+
# --save_steps 400 \
|
| 53 |
+
# --model_max_length 8192 \
|
| 54 |
+
# --save_total_limit 16 \
|
| 55 |
+
# --bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 58 |
+
# --master_port=9944 \
|
| 59 |
+
# --include localhost:4,5,6,7 \
|
| 60 |
+
# sft.py \
|
| 61 |
+
# --deepspeed ds_zero3.json \
|
| 62 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 63 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 64 |
+
# --do_train \
|
| 65 |
+
# --save_safetensors true \
|
| 66 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 67 |
+
# --lr_scheduler_type cosine \
|
| 68 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 69 |
+
# --overwrite_output_dir \
|
| 70 |
+
# --warmup_ratio 0.03 \
|
| 71 |
+
# --gradient_checkpointing true \
|
| 72 |
+
# --per_device_train_batch_size 1 \
|
| 73 |
+
# --gradient_accumulation_steps 4 \
|
| 74 |
+
# --logging_steps 1 \
|
| 75 |
+
# --learning_rate 2e-5 \
|
| 76 |
+
# --num_train_epochs 1 \
|
| 77 |
+
# --save_steps 400 \
|
| 78 |
+
# --model_max_length 8192 \
|
| 79 |
+
# --save_total_limit 16 \
|
| 80 |
+
# --bf16 || exit 1
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# qwen 7b 用自己的tokenizer
|
| 84 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 85 |
+
# --master_port=9944 \
|
| 86 |
+
# --include localhost:6,7 \
|
| 87 |
+
# sft.py \
|
| 88 |
+
# --deepspeed ds_zero3.json \
|
| 89 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 90 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 91 |
+
# --do_train \
|
| 92 |
+
# --save_safetensors true \
|
| 93 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 94 |
+
# --lr_scheduler_type cosine \
|
| 95 |
+
# --output_dir output/checkpoint/qwen_7b_original_tokenizer_inst_data_1217 \
|
| 96 |
+
# --overwrite_output_dir \
|
| 97 |
+
# --warmup_ratio 0.03 \
|
| 98 |
+
# --gradient_checkpointing true \
|
| 99 |
+
# --per_device_train_batch_size 1 \
|
| 100 |
+
# --gradient_accumulation_steps 4 \
|
| 101 |
+
# --logging_steps 1 \
|
| 102 |
+
# --learning_rate 2e-5 \
|
| 103 |
+
# --num_train_epochs 1 \
|
| 104 |
+
# --save_steps 400 \
|
| 105 |
+
# --model_max_length 8192 \
|
| 106 |
+
# --save_total_limit 16 \
|
| 107 |
+
# --bf16 || exit 1
|
| 108 |
+
|
| 109 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 110 |
+
# --master_port=9944 \
|
| 111 |
+
# --include localhost:7 \
|
| 112 |
+
# sft_1.py \
|
| 113 |
+
# --deepspeed ds_zero3.json \
|
| 114 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 115 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 116 |
+
# --do_train \
|
| 117 |
+
# --save_safetensors true \
|
| 118 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 119 |
+
# --lr_scheduler_type cosine \
|
| 120 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 121 |
+
# --overwrite_output_dir \
|
| 122 |
+
# --warmup_ratio 0.03 \
|
| 123 |
+
# --gradient_checkpointing true \
|
| 124 |
+
# --per_device_train_batch_size 1 \
|
| 125 |
+
# --gradient_accumulation_steps 4 \
|
| 126 |
+
# --logging_steps 1 \
|
| 127 |
+
# --learning_rate 2e-5 \
|
| 128 |
+
# --num_train_epochs 1 \
|
| 129 |
+
# --save_steps 400 \
|
| 130 |
+
# --model_max_length 8192 \
|
| 131 |
+
# --save_total_limit 16 \
|
| 132 |
+
# --bf16 || exit 1
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# qwen 7b 用自己的tokenizer
|
| 137 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 138 |
+
# --master_port=9944 \
|
| 139 |
+
# --include localhost:6,7 \
|
| 140 |
+
# sft.py \
|
| 141 |
+
# --deepspeed ds_zero3.json \
|
| 142 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 143 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 144 |
+
# --do_train \
|
| 145 |
+
# --save_safetensors true \
|
| 146 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 147 |
+
# --lr_scheduler_type cosine \
|
| 148 |
+
# --output_dir output/sft_use_original_tokenizer/qwen_7b_original_tokenizer_inst_data_1217_1 \
|
| 149 |
+
# --overwrite_output_dir \
|
| 150 |
+
# --warmup_ratio 0.03 \
|
| 151 |
+
# --gradient_checkpointing true \
|
| 152 |
+
# --per_device_train_batch_size 1 \
|
| 153 |
+
# --gradient_accumulation_steps 4 \
|
| 154 |
+
# --logging_steps 1 \
|
| 155 |
+
# --learning_rate 2e-5 \
|
| 156 |
+
# --num_train_epochs 1 \
|
| 157 |
+
# --model_max_length 8192 \
|
| 158 |
+
# --save_total_limit 16 \
|
| 159 |
+
# --bf16 || exit 1
|
| 160 |
+
# 定义参数
|
| 161 |
+
lr=1e-5
|
| 162 |
+
base=QwQ-32B
|
| 163 |
+
tokenizer=QwQ-32B
|
| 164 |
+
# train_data=hopotqa_1217.json
|
| 165 |
+
train_data=new_instruction_2k_sft
|
| 166 |
+
bsz=2
|
| 167 |
+
acc=4
|
| 168 |
+
|
| 169 |
+
# 生成随机 JOB-ID
|
| 170 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 171 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 172 |
+
|
| 173 |
+
# 输出路径
|
| 174 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 175 |
+
|
| 176 |
+
output_dir_1=${output_dir}
|
| 177 |
+
model_name_1=${base}
|
| 178 |
+
# 创建输出目录
|
| 179 |
+
mkdir -p "$output_dir"
|
| 180 |
+
|
| 181 |
+
echo ${output_dir}
|
| 182 |
+
|
| 183 |
+
# 执行 deepspeed 命令
|
| 184 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 185 |
+
--master_port=9944 \
|
| 186 |
+
sft_1.py \
|
| 187 |
+
--deepspeed ds_zero3_offload.json \
|
| 188 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 189 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 190 |
+
--do_train \
|
| 191 |
+
--save_safetensors true \
|
| 192 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 193 |
+
--lr_scheduler_type cosine \
|
| 194 |
+
--output_dir "$output_dir" \
|
| 195 |
+
--overwrite_output_dir \
|
| 196 |
+
--warmup_ratio 0.03 \
|
| 197 |
+
--gradient_checkpointing true \
|
| 198 |
+
--per_device_train_batch_size "$bsz" \
|
| 199 |
+
--gradient_accumulation_steps "$acc" \
|
| 200 |
+
--logging_steps 1 \
|
| 201 |
+
--learning_rate "$lr" \
|
| 202 |
+
--num_train_epochs 6 \
|
| 203 |
+
--save_strategy epoch \
|
| 204 |
+
--save_only_model true \
|
| 205 |
+
--model_max_length 30000 \
|
| 206 |
+
--save_total_limit 15 \
|
| 207 |
+
--bf16 || exit 1
|
| 208 |
+
|
| 209 |
+
# 3-15 model_max_length 25000 -> 30000
|
| 210 |
+
#################################################
|
| 211 |
+
lr=1e-5
|
| 212 |
+
base=DeepSeek-R1-Distill-Qwen-32B
|
| 213 |
+
tokenizer=DeepSeek-R1-Distill-Qwen-32B
|
| 214 |
+
# train_data=hopotqa_1217.json
|
| 215 |
+
train_data=new_instruction_2k_sft
|
| 216 |
+
bsz=2
|
| 217 |
+
acc=4
|
| 218 |
+
|
| 219 |
+
# 生成随机 JOB-ID
|
| 220 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 221 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 222 |
+
|
| 223 |
+
# 输出路径
|
| 224 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 225 |
+
|
| 226 |
+
output_dir_2=${output_dir}
|
| 227 |
+
model_name_2=${base}
|
| 228 |
+
|
| 229 |
+
# 创建输出目录
|
| 230 |
+
mkdir -p "$output_dir"
|
| 231 |
+
|
| 232 |
+
echo ${output_dir}
|
| 233 |
+
|
| 234 |
+
# 执行 deepspeed 命令
|
| 235 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 236 |
+
--master_port=9944 \
|
| 237 |
+
sft_1.py \
|
| 238 |
+
--deepspeed ds_zero3_offload.json \
|
| 239 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 240 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 241 |
+
--do_train \
|
| 242 |
+
--save_safetensors true \
|
| 243 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 244 |
+
--lr_scheduler_type cosine \
|
| 245 |
+
--output_dir "$output_dir" \
|
| 246 |
+
--overwrite_output_dir \
|
| 247 |
+
--warmup_ratio 0.03 \
|
| 248 |
+
--gradient_checkpointing true \
|
| 249 |
+
--per_device_train_batch_size "$bsz" \
|
| 250 |
+
--gradient_accumulation_steps "$acc" \
|
| 251 |
+
--logging_steps 1 \
|
| 252 |
+
--learning_rate "$lr" \
|
| 253 |
+
--num_train_epochs 6 \
|
| 254 |
+
--save_strategy epoch \
|
| 255 |
+
--save_only_model true \
|
| 256 |
+
--model_max_length 30000 \
|
| 257 |
+
--save_total_limit 15 \
|
| 258 |
+
--bf16 || exit 1
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
bash test.sh $output_dir_1 $model_name_1
|
| 262 |
+
|
| 263 |
+
bash test.sh $output_dir_2 $model_name_2
|
| 264 |
+
|
| 265 |
+
|
deep_search/mix_1.sh
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# export CUDA_VISIBLE_DEVICES=0,1,2,3
|
| 6 |
+
# --include localhost:0,1,2,3,4,5,6,7 \
|
| 7 |
+
|
| 8 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 9 |
+
# --master_port=9944 \
|
| 10 |
+
# --include localhost:4,5,6,7 \
|
| 11 |
+
# sft.py \
|
| 12 |
+
# --deepspeed ds_zero3.json \
|
| 13 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 14 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-1.5B \
|
| 15 |
+
# --do_train \
|
| 16 |
+
# --save_safetensors true \
|
| 17 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/merged_selected_data.json \
|
| 18 |
+
# --lr_scheduler_type cosine \
|
| 19 |
+
# --output_dir output/checkpoint/qwen_32B_test \
|
| 20 |
+
# --overwrite_output_dir \
|
| 21 |
+
# --warmup_ratio 0.03 \
|
| 22 |
+
# --gradient_checkpointing true \
|
| 23 |
+
# --per_device_train_batch_size 2 \
|
| 24 |
+
# --gradient_accumulation_steps 2 \
|
| 25 |
+
# --logging_steps 1 \
|
| 26 |
+
# --learning_rate 2e-5 \
|
| 27 |
+
# --num_train_epochs 2 \
|
| 28 |
+
# --save_steps 400 \
|
| 29 |
+
# --model_max_length 8192 \
|
| 30 |
+
# --save_total_limit 16 \
|
| 31 |
+
# --bf16 || exit 1
|
| 32 |
+
|
| 33 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 34 |
+
# --master_port=9944 \
|
| 35 |
+
# sft.py \
|
| 36 |
+
# --deepspeed ds_zero3.json \
|
| 37 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 38 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 39 |
+
# --do_train \
|
| 40 |
+
# --save_safetensors true \
|
| 41 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 42 |
+
# --lr_scheduler_type cosine \
|
| 43 |
+
# --output_dir output/checkpoint/qwen_2_5_32b_data_1217 \
|
| 44 |
+
# --overwrite_output_dir \
|
| 45 |
+
# --warmup_ratio 0.03 \
|
| 46 |
+
# --gradient_checkpointing true \
|
| 47 |
+
# --per_device_train_batch_size 1 \
|
| 48 |
+
# --gradient_accumulation_steps 4 \
|
| 49 |
+
# --logging_steps 1 \
|
| 50 |
+
# --learning_rate 2e-5 \
|
| 51 |
+
# --num_train_epochs 1 \
|
| 52 |
+
# --save_steps 400 \
|
| 53 |
+
# --model_max_length 8192 \
|
| 54 |
+
# --save_total_limit 16 \
|
| 55 |
+
# --bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 58 |
+
# --master_port=9944 \
|
| 59 |
+
# --include localhost:4,5,6,7 \
|
| 60 |
+
# sft.py \
|
| 61 |
+
# --deepspeed ds_zero3.json \
|
| 62 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 63 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 64 |
+
# --do_train \
|
| 65 |
+
# --save_safetensors true \
|
| 66 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 67 |
+
# --lr_scheduler_type cosine \
|
| 68 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 69 |
+
# --overwrite_output_dir \
|
| 70 |
+
# --warmup_ratio 0.03 \
|
| 71 |
+
# --gradient_checkpointing true \
|
| 72 |
+
# --per_device_train_batch_size 1 \
|
| 73 |
+
# --gradient_accumulation_steps 4 \
|
| 74 |
+
# --logging_steps 1 \
|
| 75 |
+
# --learning_rate 2e-5 \
|
| 76 |
+
# --num_train_epochs 1 \
|
| 77 |
+
# --save_steps 400 \
|
| 78 |
+
# --model_max_length 8192 \
|
| 79 |
+
# --save_total_limit 16 \
|
| 80 |
+
# --bf16 || exit 1
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# qwen 7b 用自己的tokenizer
|
| 84 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 85 |
+
# --master_port=9944 \
|
| 86 |
+
# --include localhost:6,7 \
|
| 87 |
+
# sft.py \
|
| 88 |
+
# --deepspeed ds_zero3.json \
|
| 89 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 90 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 91 |
+
# --do_train \
|
| 92 |
+
# --save_safetensors true \
|
| 93 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 94 |
+
# --lr_scheduler_type cosine \
|
| 95 |
+
# --output_dir output/checkpoint/qwen_7b_original_tokenizer_inst_data_1217 \
|
| 96 |
+
# --overwrite_output_dir \
|
| 97 |
+
# --warmup_ratio 0.03 \
|
| 98 |
+
# --gradient_checkpointing true \
|
| 99 |
+
# --per_device_train_batch_size 1 \
|
| 100 |
+
# --gradient_accumulation_steps 4 \
|
| 101 |
+
# --logging_steps 1 \
|
| 102 |
+
# --learning_rate 2e-5 \
|
| 103 |
+
# --num_train_epochs 1 \
|
| 104 |
+
# --save_steps 400 \
|
| 105 |
+
# --model_max_length 8192 \
|
| 106 |
+
# --save_total_limit 16 \
|
| 107 |
+
# --bf16 || exit 1
|
| 108 |
+
|
| 109 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 110 |
+
# --master_port=9944 \
|
| 111 |
+
# --include localhost:7 \
|
| 112 |
+
# sft_1.py \
|
| 113 |
+
# --deepspeed ds_zero3.json \
|
| 114 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 115 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 116 |
+
# --do_train \
|
| 117 |
+
# --save_safetensors true \
|
| 118 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 119 |
+
# --lr_scheduler_type cosine \
|
| 120 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 121 |
+
# --overwrite_output_dir \
|
| 122 |
+
# --warmup_ratio 0.03 \
|
| 123 |
+
# --gradient_checkpointing true \
|
| 124 |
+
# --per_device_train_batch_size 1 \
|
| 125 |
+
# --gradient_accumulation_steps 4 \
|
| 126 |
+
# --logging_steps 1 \
|
| 127 |
+
# --learning_rate 2e-5 \
|
| 128 |
+
# --num_train_epochs 1 \
|
| 129 |
+
# --save_steps 400 \
|
| 130 |
+
# --model_max_length 8192 \
|
| 131 |
+
# --save_total_limit 16 \
|
| 132 |
+
# --bf16 || exit 1
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# qwen 7b 用自己的tokenizer
|
| 137 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 138 |
+
# --master_port=9944 \
|
| 139 |
+
# --include localhost:6,7 \
|
| 140 |
+
# sft.py \
|
| 141 |
+
# --deepspeed ds_zero3.json \
|
| 142 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 143 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 144 |
+
# --do_train \
|
| 145 |
+
# --save_safetensors true \
|
| 146 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 147 |
+
# --lr_scheduler_type cosine \
|
| 148 |
+
# --output_dir output/sft_use_original_tokenizer/qwen_7b_original_tokenizer_inst_data_1217_1 \
|
| 149 |
+
# --overwrite_output_dir \
|
| 150 |
+
# --warmup_ratio 0.03 \
|
| 151 |
+
# --gradient_checkpointing true \
|
| 152 |
+
# --per_device_train_batch_size 1 \
|
| 153 |
+
# --gradient_accumulation_steps 4 \
|
| 154 |
+
# --logging_steps 1 \
|
| 155 |
+
# --learning_rate 2e-5 \
|
| 156 |
+
# --num_train_epochs 1 \
|
| 157 |
+
# --model_max_length 8192 \
|
| 158 |
+
# --save_total_limit 16 \
|
| 159 |
+
# --bf16 || exit 1
|
| 160 |
+
# 定义参数
|
| 161 |
+
lr=1e-5
|
| 162 |
+
base=QwQ-32B
|
| 163 |
+
tokenizer=QwQ-32B
|
| 164 |
+
# train_data=hopotqa_1217.json
|
| 165 |
+
train_data=strict_selected_1526_sft
|
| 166 |
+
bsz=2
|
| 167 |
+
acc=4
|
| 168 |
+
|
| 169 |
+
# 生成随机 JOB-ID
|
| 170 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 171 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 172 |
+
|
| 173 |
+
# 输出路径
|
| 174 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 175 |
+
|
| 176 |
+
output_dir_1=${output_dir}
|
| 177 |
+
model_name_1=${base}
|
| 178 |
+
# 创建输出目录
|
| 179 |
+
mkdir -p "$output_dir"
|
| 180 |
+
|
| 181 |
+
echo ${output_dir}
|
| 182 |
+
|
| 183 |
+
# 执行 deepspeed 命令
|
| 184 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 185 |
+
--master_port=9944 \
|
| 186 |
+
sft_1.py \
|
| 187 |
+
--deepspeed ds_zero3_offload.json \
|
| 188 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 189 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 190 |
+
--do_train \
|
| 191 |
+
--save_safetensors true \
|
| 192 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 193 |
+
--lr_scheduler_type cosine \
|
| 194 |
+
--output_dir "$output_dir" \
|
| 195 |
+
--overwrite_output_dir \
|
| 196 |
+
--warmup_ratio 0.03 \
|
| 197 |
+
--gradient_checkpointing true \
|
| 198 |
+
--per_device_train_batch_size "$bsz" \
|
| 199 |
+
--gradient_accumulation_steps "$acc" \
|
| 200 |
+
--logging_steps 1 \
|
| 201 |
+
--learning_rate "$lr" \
|
| 202 |
+
--num_train_epochs 6 \
|
| 203 |
+
--save_strategy epoch \
|
| 204 |
+
--save_only_model true \
|
| 205 |
+
--model_max_length 30000 \
|
| 206 |
+
--save_total_limit 15 \
|
| 207 |
+
--bf16 || exit 1
|
| 208 |
+
|
| 209 |
+
# 3-15 model_max_length 25000 -> 30000
|
| 210 |
+
#################################################
|
| 211 |
+
lr=1e-5
|
| 212 |
+
base=Qwen2.5-32B-Instruct
|
| 213 |
+
tokenizer=Qwen2.5-32B-Instruct
|
| 214 |
+
# train_data=hopotqa_1217.json
|
| 215 |
+
train_data=strict_selected_1526_sft
|
| 216 |
+
bsz=2
|
| 217 |
+
acc=4
|
| 218 |
+
|
| 219 |
+
# 生成随机 JOB-ID
|
| 220 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 221 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 222 |
+
|
| 223 |
+
# 输出路径
|
| 224 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 225 |
+
|
| 226 |
+
output_dir_2=${output_dir}
|
| 227 |
+
model_name_2=${base}
|
| 228 |
+
# 创建输出目录
|
| 229 |
+
mkdir -p "$output_dir"
|
| 230 |
+
|
| 231 |
+
echo ${output_dir}
|
| 232 |
+
|
| 233 |
+
# 执行 deepspeed 命令
|
| 234 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 235 |
+
--master_port=9944 \
|
| 236 |
+
sft_1.py \
|
| 237 |
+
--deepspeed ds_zero3_offload.json \
|
| 238 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 239 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 240 |
+
--do_train \
|
| 241 |
+
--save_safetensors true \
|
| 242 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 243 |
+
--lr_scheduler_type cosine \
|
| 244 |
+
--output_dir "$output_dir" \
|
| 245 |
+
--overwrite_output_dir \
|
| 246 |
+
--warmup_ratio 0.03 \
|
| 247 |
+
--gradient_checkpointing true \
|
| 248 |
+
--per_device_train_batch_size "$bsz" \
|
| 249 |
+
--gradient_accumulation_steps "$acc" \
|
| 250 |
+
--logging_steps 1 \
|
| 251 |
+
--learning_rate "$lr" \
|
| 252 |
+
--num_train_epochs 6 \
|
| 253 |
+
--save_strategy epoch \
|
| 254 |
+
--save_only_model true \
|
| 255 |
+
--model_max_length 30000 \
|
| 256 |
+
--save_total_limit 15 \
|
| 257 |
+
--bf16 || exit 1
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
########################
|
| 262 |
+
lr=1e-5
|
| 263 |
+
base=Qwen2.5-7B-Instruct
|
| 264 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 265 |
+
# train_data=hopotqa_1217.json
|
| 266 |
+
train_data=strict_selected_1526_sft
|
| 267 |
+
bsz=2
|
| 268 |
+
acc=4
|
| 269 |
+
|
| 270 |
+
# 生成随机 JOB-ID
|
| 271 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 272 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 273 |
+
|
| 274 |
+
# 输出路径
|
| 275 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 276 |
+
|
| 277 |
+
output_dir_3=${output_dir}
|
| 278 |
+
model_name_3=${base}
|
| 279 |
+
# 创建输出目录
|
| 280 |
+
mkdir -p "$output_dir"
|
| 281 |
+
|
| 282 |
+
echo ${output_dir}
|
| 283 |
+
|
| 284 |
+
# 执行 deepspeed 命令
|
| 285 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 286 |
+
--master_port=9944 \
|
| 287 |
+
sft_1.py \
|
| 288 |
+
--deepspeed ds_zero3_offload.json \
|
| 289 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 290 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 291 |
+
--do_train \
|
| 292 |
+
--save_safetensors true \
|
| 293 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 294 |
+
--lr_scheduler_type cosine \
|
| 295 |
+
--output_dir "$output_dir" \
|
| 296 |
+
--overwrite_output_dir \
|
| 297 |
+
--warmup_ratio 0.03 \
|
| 298 |
+
--gradient_checkpointing true \
|
| 299 |
+
--per_device_train_batch_size "$bsz" \
|
| 300 |
+
--gradient_accumulation_steps "$acc" \
|
| 301 |
+
--logging_steps 1 \
|
| 302 |
+
--learning_rate "$lr" \
|
| 303 |
+
--num_train_epochs 6 \
|
| 304 |
+
--save_strategy epoch \
|
| 305 |
+
--save_only_model true \
|
| 306 |
+
--model_max_length 30000 \
|
| 307 |
+
--save_total_limit 15 \
|
| 308 |
+
--bf16 || exit 1
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
bash test.sh $output_dir_1 $model_name_1
|
| 312 |
+
bash test.sh $output_dir_2 $model_name_2
|
| 313 |
+
bash test.sh $output_dir_3 $model_name_3
|
| 314 |
+
|
| 315 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 316 |
+
|
| 317 |
+
|
deep_search/mix_re.sh
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
|
| 6 |
+
###########################
|
| 7 |
+
# 定义参数
|
| 8 |
+
lr=1e-5
|
| 9 |
+
base=Qwen2.5-7B-Instruct
|
| 10 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 11 |
+
# train_data=hopotqa_1217.json
|
| 12 |
+
train_data=merged_syn_long_359_sft_1533
|
| 13 |
+
bsz=2
|
| 14 |
+
acc=4
|
| 15 |
+
|
| 16 |
+
# 生成随机 JOB-ID
|
| 17 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 18 |
+
save_path="JOB:2333#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 19 |
+
|
| 20 |
+
# 输出路径
|
| 21 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 22 |
+
|
| 23 |
+
output_dir_1=${output_dir}
|
| 24 |
+
model_name_1=${base}
|
| 25 |
+
dataset_1=${train_data}
|
| 26 |
+
# 创建输出目录
|
| 27 |
+
mkdir -p "$output_dir"
|
| 28 |
+
|
| 29 |
+
echo ${output_dir}
|
| 30 |
+
|
| 31 |
+
# 执行 deepspeed 命令
|
| 32 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 33 |
+
--master_port=9944 \
|
| 34 |
+
sft_1.py \
|
| 35 |
+
--deepspeed ds_zero3_offload.json \
|
| 36 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 37 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 38 |
+
--do_train \
|
| 39 |
+
--save_safetensors true \
|
| 40 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 41 |
+
--lr_scheduler_type cosine \
|
| 42 |
+
--output_dir "$output_dir" \
|
| 43 |
+
--overwrite_output_dir \
|
| 44 |
+
--warmup_ratio 0.03 \
|
| 45 |
+
--gradient_checkpointing true \
|
| 46 |
+
--per_device_train_batch_size "$bsz" \
|
| 47 |
+
--gradient_accumulation_steps "$acc" \
|
| 48 |
+
--logging_steps 1 \
|
| 49 |
+
--learning_rate "$lr" \
|
| 50 |
+
--num_train_epochs 10000 \
|
| 51 |
+
--save_strategy epoch \
|
| 52 |
+
--save_only_model true \
|
| 53 |
+
--model_max_length 30000 \
|
| 54 |
+
--save_total_limit 1 \
|
| 55 |
+
--bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
# 3-15 model_max_length 25000 -> 30000
|
| 58 |
+
#################################################
|
| 59 |
+
# lr=1e-5
|
| 60 |
+
# base=QwQ-32B
|
| 61 |
+
# tokenizer=QwQ-32B
|
| 62 |
+
# # train_data=hopotqa_1217.json
|
| 63 |
+
# train_data=merged_syn_short_398_sft_1572
|
| 64 |
+
# bsz=2
|
| 65 |
+
# acc=4
|
| 66 |
+
|
| 67 |
+
# # 生成随机 JOB-ID
|
| 68 |
+
# JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 69 |
+
# save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 70 |
+
|
| 71 |
+
# # 输出路径
|
| 72 |
+
# output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 73 |
+
|
| 74 |
+
# output_dir_2=${output_dir}
|
| 75 |
+
# model_name_2=${base}
|
| 76 |
+
# dataset_2=${train_data}
|
| 77 |
+
# # 创建输出目录
|
| 78 |
+
# mkdir -p "$output_dir"
|
| 79 |
+
|
| 80 |
+
# echo ${output_dir}
|
| 81 |
+
|
| 82 |
+
# # 执行 deepspeed 命令
|
| 83 |
+
# /share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 84 |
+
# --master_port=9944 \
|
| 85 |
+
# sft_1.py \
|
| 86 |
+
# --deepspeed ds_zero3_offload.json \
|
| 87 |
+
# --model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 88 |
+
# --tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 89 |
+
# --do_train \
|
| 90 |
+
# --save_safetensors true \
|
| 91 |
+
# --data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 92 |
+
# --lr_scheduler_type cosine \
|
| 93 |
+
# --output_dir "$output_dir" \
|
| 94 |
+
# --overwrite_output_dir \
|
| 95 |
+
# --warmup_ratio 0.03 \
|
| 96 |
+
# --gradient_checkpointing true \
|
| 97 |
+
# --per_device_train_batch_size "$bsz" \
|
| 98 |
+
# --gradient_accumulation_steps "$acc" \
|
| 99 |
+
# --logging_steps 1 \
|
| 100 |
+
# --learning_rate "$lr" \
|
| 101 |
+
# --num_train_epochs 6 \
|
| 102 |
+
# --save_strategy epoch \
|
| 103 |
+
# --save_only_model true \
|
| 104 |
+
# --model_max_length 30000 \
|
| 105 |
+
# --save_total_limit 15 \
|
| 106 |
+
# --bf16 || exit 1
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# ###################################
|
| 111 |
+
# lr=1e-5
|
| 112 |
+
# base=QwQ-32B
|
| 113 |
+
# tokenizer=QwQ-32B
|
| 114 |
+
# # train_data=hopotqa_1217.json
|
| 115 |
+
# train_data=strict_selected_1526_sft_format_ans
|
| 116 |
+
# bsz=2
|
| 117 |
+
# acc=4
|
| 118 |
+
|
| 119 |
+
# # 生成随机 JOB-ID
|
| 120 |
+
# JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 121 |
+
# save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 122 |
+
|
| 123 |
+
# # 输出路径
|
| 124 |
+
# output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 125 |
+
|
| 126 |
+
# output_dir_3=${output_dir}
|
| 127 |
+
# model_name_3=${base}
|
| 128 |
+
# dataset_3=${train_data}
|
| 129 |
+
# # 创建输出目录
|
| 130 |
+
# mkdir -p "$output_dir"
|
| 131 |
+
|
| 132 |
+
# echo ${output_dir}
|
| 133 |
+
|
| 134 |
+
# # 执行 deepspeed 命令
|
| 135 |
+
# /share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 136 |
+
# --master_port=9944 \
|
| 137 |
+
# sft_1.py \
|
| 138 |
+
# --deepspeed ds_zero3_offload.json \
|
| 139 |
+
# --model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 140 |
+
# --tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 141 |
+
# --do_train \
|
| 142 |
+
# --save_safetensors true \
|
| 143 |
+
# --data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 144 |
+
# --lr_scheduler_type cosine \
|
| 145 |
+
# --output_dir "$output_dir" \
|
| 146 |
+
# --overwrite_output_dir \
|
| 147 |
+
# --warmup_ratio 0.03 \
|
| 148 |
+
# --gradient_checkpointing true \
|
| 149 |
+
# --per_device_train_batch_size "$bsz" \
|
| 150 |
+
# --gradient_accumulation_steps "$acc" \
|
| 151 |
+
# --logging_steps 1 \
|
| 152 |
+
# --learning_rate "$lr" \
|
| 153 |
+
# --num_train_epochs 6 \
|
| 154 |
+
# --save_strategy epoch \
|
| 155 |
+
# --save_only_model true \
|
| 156 |
+
# --model_max_length 30000 \
|
| 157 |
+
# --save_total_limit 15 \
|
| 158 |
+
# --bf16 || exit 1
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
###################################
|
| 163 |
+
# 测试
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
# bash test.sh $output_dir_1 $model_name_1 $dataset_1
|
| 167 |
+
|
| 168 |
+
# sleep 7200
|
| 169 |
+
# bash test.sh $output_dir_2 $model_name_2 $dataset_2
|
| 170 |
+
|
| 171 |
+
# sleep 600
|
| 172 |
+
# bash test.sh $output_dir_3 $model_name_3 $dataset_3
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
deep_search/run.sh
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export HOME=/opt/aps/workdir/home
|
| 2 |
+
export http_proxy=http://127.0.0.1:7880
|
| 3 |
+
export https_proxy=http://127.0.0.1:7880
|
| 4 |
+
|
| 5 |
+
./miniforge3/bin/conda init bash
|
| 6 |
+
source ~/.bashrc
|
| 7 |
+
eval "$(conda shell.bash hook)"
|
| 8 |
+
conda activate llm
|
| 9 |
+
bash mix.sh
|
| 10 |
+
# set -x
|
| 11 |
+
|
| 12 |
+
# bash Miniforge3.sh -b -u -p ./miniforge3
|
| 13 |
+
# ./miniforge3/bin/conda init bash
|
| 14 |
+
# source ~/.bashrc
|
| 15 |
+
|
| 16 |
+
# # export https_proxy='http://agent.baidu.com:8891'
|
| 17 |
+
# # export http_proxy='http://agent.baidu.com:8891'
|
| 18 |
+
|
| 19 |
+
# eval "$(conda shell.bash hook)"
|
| 20 |
+
# conda create -n llm python=3.11 -y
|
| 21 |
+
# conda activate llm
|
| 22 |
+
|
| 23 |
+
# pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
| 24 |
+
|
| 25 |
+
cd /opt/aps/workdir/sunshuang/search_o1/ && http_proxy=http://127.0.0.1:7880 https_proxy=http://127.0.0.1:7880 CUDA_VISIBLE_DEVICES=2,3 /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/reason_two_model_2.py --dataset_name hotpotqa --cache_dir_base /opt/aps/workdir/sunshuang/search_o1/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/JOB9986:LR1e-5:BASEQwen2.5-32B-Instruct:TOKENDeepSeek-R1-Distill-Qwen-32:BSZ1:ACC8/38/hotpotqa --output_dir_base /opt/aps/workdir/sunshuang/search_o1/outputs_reason_two_model/eval_reason_two_model/qwen-instruct-32B/JOB9986:LR1e-5:BASEQwen2.5-32B-Instruct:TOKENDeepSeek-R1-Distill-Qwen-32:BSZ1:ACC8/38/hotpotqa --split test --max_search_limit 5 --max_turn 10 --top_k 5 --max_doc_len 3000 --subset_num 100 --model_path /opt/aps/workdir/output/JOB9986:LR1e-5:BASEQwen2.5-32B-Instruct:TOKENDeepSeek-R1-Distill-Qwen-32:BSZ1:ACC8/checkpoint-38 --model_doc_reason_path "/capacity/userdata/models/Qwen2.5-32B-Instruct" --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" --openai_api_base "http://localhost:8001/v1"
|
deep_search/search_o1/scripts/evaluate_for_rag_rl.py
ADDED
|
@@ -0,0 +1,629 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import json
|
| 3 |
+
import numpy as np
|
| 4 |
+
from collections import Counter
|
| 5 |
+
import string
|
| 6 |
+
import os, time
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
from lcb_runner.evaluation import codegen_metrics
|
| 9 |
+
from utils.math_equivalence import is_equiv
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def extract_answer(output, mode='gen'):
|
| 13 |
+
extracted_text = ''
|
| 14 |
+
if mode == 'codegen':
|
| 15 |
+
# Extract the code between ```python and ```
|
| 16 |
+
pattern = r'```python\s*(.*?)\s*```'
|
| 17 |
+
matches = re.findall(pattern, output, re.DOTALL | re.IGNORECASE)
|
| 18 |
+
if matches:
|
| 19 |
+
extracted_text = matches[-1].strip() # Take the last match
|
| 20 |
+
elif mode == 'infogen': # 提取模型基于网页内容生成的推理
|
| 21 |
+
# Extract content after **Final Information** or **Modified Reasoning Steps**
|
| 22 |
+
# pattern_info = "\n**Final Information**"
|
| 23 |
+
# pattern_step = "\n**Modified Reasoning Steps**"
|
| 24 |
+
pattern_info = "**Final Information**"
|
| 25 |
+
pattern_step = "**Modified Reasoning Steps**"
|
| 26 |
+
if pattern_info in output:
|
| 27 |
+
extracted_text = output.split(pattern_info)[-1].replace("\n","").strip("```").strip()
|
| 28 |
+
elif pattern_step in output:
|
| 29 |
+
extracted_text = output.split(pattern_step)[-1].strip("```").strip()
|
| 30 |
+
else:
|
| 31 |
+
# extracted_text = "No helpful information found."
|
| 32 |
+
extracted_text = output
|
| 33 |
+
else:
|
| 34 |
+
# Existing extraction logic for 'gen' and 'choose' modes
|
| 35 |
+
# pattern = r'\\boxed\{(.*)\}'
|
| 36 |
+
# matches = re.findall(pattern, output)
|
| 37 |
+
# if matches:
|
| 38 |
+
# extracted_text = matches[-1] # Take the last match
|
| 39 |
+
# if mode in ['choose', 'qa']:
|
| 40 |
+
# # Handle 'choose' mode
|
| 41 |
+
# inner_pattern = r'\\text\{(.*)\}'
|
| 42 |
+
# inner_matches = re.findall(inner_pattern, extracted_text)
|
| 43 |
+
# if inner_matches:
|
| 44 |
+
# extracted_text = inner_matches[-1] # Take the last match
|
| 45 |
+
# extracted_text = extracted_text.strip("()")
|
| 46 |
+
pattern = r'<answer>(.*?)</answer>'
|
| 47 |
+
matches = re.findall(pattern, output, re.DOTALL) # 使用re.DOTALL以匹配多行内容
|
| 48 |
+
if matches:
|
| 49 |
+
extracted_text = matches[-1] # 取最后一个匹配项
|
| 50 |
+
if mode in ['choose', 'qa']:
|
| 51 |
+
# 如果需要进一步处理,比如提取嵌套的\text{}内容
|
| 52 |
+
inner_pattern = r'\\text\{(.*)\}'
|
| 53 |
+
inner_matches = re.findall(inner_pattern, extracted_text)
|
| 54 |
+
if inner_matches:
|
| 55 |
+
extracted_text = inner_matches[-1] # 取最后一个匹配项
|
| 56 |
+
extracted_text = extracted_text.strip("()")
|
| 57 |
+
return extracted_text
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def normalize_answer(text):
|
| 61 |
+
text = text.lower()
|
| 62 |
+
text = " ".join(text.strip().split())
|
| 63 |
+
return text
|
| 64 |
+
|
| 65 |
+
def normalize_answer_qa(s):
|
| 66 |
+
def remove_articles(text):
|
| 67 |
+
return re.sub(r"\b(a|an|the)\b", " ", text)
|
| 68 |
+
def white_space_fix(text):
|
| 69 |
+
return " ".join(text.strip().split())
|
| 70 |
+
def remove_punc(text):
|
| 71 |
+
exclude = set(string.punctuation)
|
| 72 |
+
return "".join(ch for ch in text if ch not in exclude)
|
| 73 |
+
def lower(text):
|
| 74 |
+
return text.lower()
|
| 75 |
+
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def evaluate_predictions(output, labeled_answer, mode='gen'):
|
| 79 |
+
final_metric = {"is_valid_answer": False, "acc": 0, "em": 0, "f1": 0, 'math_equal': 0}
|
| 80 |
+
# is_valid_answer: 是否存在有效的预测答案。
|
| 81 |
+
# acc: 精度(accuracy)。指标注答案是否出现在预测答案中
|
| 82 |
+
# em: 完全匹配(exact match)。指预测答案是否与标注答案完全相同
|
| 83 |
+
# f1: F1 分数。
|
| 84 |
+
# math_equal: 数学上的相等性(通常用于判断数值是否相等)
|
| 85 |
+
pred_answer = extract_answer(output, mode=mode)
|
| 86 |
+
if pred_answer != '': # 模型给出了有效的预测答案
|
| 87 |
+
final_metric["is_valid_answer"] = True
|
| 88 |
+
|
| 89 |
+
if mode == 'qa':
|
| 90 |
+
normalized_pred_answer = normalize_answer_qa(pred_answer)
|
| 91 |
+
for answer in labeled_answer:
|
| 92 |
+
normalized_ground_truth = normalize_answer_qa(answer)
|
| 93 |
+
em = int(normalized_pred_answer == normalized_ground_truth)
|
| 94 |
+
acc = int(normalized_ground_truth in normalized_pred_answer)
|
| 95 |
+
|
| 96 |
+
# 将预测答案和标注答案分割成单词或词汇 tokens,并计算它们的交集(即相同的词汇)。
|
| 97 |
+
# Counter 是一个字典类型的对象,用于统计词汇的频次,& 操作符求得两个 Counter 对象的交集
|
| 98 |
+
prediction_tokens = normalized_pred_answer.split()
|
| 99 |
+
ground_truth_tokens = normalized_ground_truth.split()
|
| 100 |
+
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
|
| 101 |
+
num_same = sum(common.values())
|
| 102 |
+
if num_same == 0:
|
| 103 |
+
continue
|
| 104 |
+
precision = 1.0 * num_same / len(prediction_tokens)
|
| 105 |
+
recall = 1.0 * num_same / len(ground_truth_tokens)
|
| 106 |
+
f1 = (2 * precision * recall) / (precision + recall)
|
| 107 |
+
for k in ["em", "acc", "f1"]:
|
| 108 |
+
final_metric[k] = max(eval(k), final_metric[k])
|
| 109 |
+
|
| 110 |
+
else:
|
| 111 |
+
normalized_pred_answer = normalize_answer(pred_answer)
|
| 112 |
+
normalized_ground_truth = normalize_answer(labeled_answer)
|
| 113 |
+
|
| 114 |
+
em = int(normalized_pred_answer == normalized_ground_truth)
|
| 115 |
+
acc = int(normalized_ground_truth in normalized_pred_answer)
|
| 116 |
+
|
| 117 |
+
prediction_tokens = normalized_pred_answer.split()
|
| 118 |
+
ground_truth_tokens = normalized_ground_truth.split()
|
| 119 |
+
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
|
| 120 |
+
num_same = sum(common.values())
|
| 121 |
+
if num_same == 0:
|
| 122 |
+
f1 = 0
|
| 123 |
+
else:
|
| 124 |
+
precision = 1.0 * num_same / len(prediction_tokens) if len(prediction_tokens) > 0 else 0
|
| 125 |
+
recall = 1.0 * num_same / len(ground_truth_tokens) if len(ground_truth_tokens) > 0 else 0
|
| 126 |
+
if (precision + recall) == 0:
|
| 127 |
+
f1 = 0
|
| 128 |
+
else:
|
| 129 |
+
f1 = (2 * precision * recall) / (precision + recall)
|
| 130 |
+
|
| 131 |
+
final_metric["em"] = em
|
| 132 |
+
final_metric["acc"] = acc
|
| 133 |
+
final_metric["f1"] = f1
|
| 134 |
+
|
| 135 |
+
final_metric["math_equal"] = is_equiv(normalized_pred_answer, normalized_ground_truth)
|
| 136 |
+
|
| 137 |
+
# print(em, acc, f1, normalized_pred_answer, '|', normalized_ground_truth)
|
| 138 |
+
return final_metric, pred_answer
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split, apply_backoff=False):
|
| 143 |
+
if dataset_name == 'livecode':
|
| 144 |
+
# Prepare samples and generations for codegen_metrics
|
| 145 |
+
samples_list = []
|
| 146 |
+
generations_list = []
|
| 147 |
+
|
| 148 |
+
# Collect difficulty levels for per-domain metrics
|
| 149 |
+
difficulties = []
|
| 150 |
+
per_difficulty_count = {}
|
| 151 |
+
num_valid_answer = 0
|
| 152 |
+
|
| 153 |
+
for item, input_prompt, result in zip(filtered_data, input_list, output_list):
|
| 154 |
+
if type(result) == str:
|
| 155 |
+
item['Output'] = result
|
| 156 |
+
else:
|
| 157 |
+
item['Output'] = result.outputs[0].text
|
| 158 |
+
difficulty = item.get("difficulty", "Unknown")
|
| 159 |
+
difficulties.append(difficulty)
|
| 160 |
+
# Track metrics per domain
|
| 161 |
+
if difficulty not in per_difficulty_count.keys():
|
| 162 |
+
per_difficulty_count[difficulty] = 0
|
| 163 |
+
|
| 164 |
+
pred_code = extract_answer(item['Output'], mode='codegen')
|
| 165 |
+
if pred_code != '':
|
| 166 |
+
num_valid_answer += 1
|
| 167 |
+
per_difficulty_count[difficulty] += 1
|
| 168 |
+
# Assuming each item has 'input_output' with 'inputs' and 'outputs'
|
| 169 |
+
public_test_cases = json.loads(item.get("public_test_cases", "{}"))
|
| 170 |
+
|
| 171 |
+
inputs, outputs = [], []
|
| 172 |
+
for case in public_test_cases:
|
| 173 |
+
inputs.append(case["input"])
|
| 174 |
+
outputs.append(case["output"])
|
| 175 |
+
|
| 176 |
+
sample = {
|
| 177 |
+
"input_output": json.dumps({
|
| 178 |
+
"inputs": inputs,
|
| 179 |
+
"outputs": outputs
|
| 180 |
+
}),
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
samples_list.append(sample)
|
| 184 |
+
generations_list.append([pred_code])
|
| 185 |
+
item['Pred_Answer'] = pred_code
|
| 186 |
+
item['Question'] = input_prompt
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# Call codegen_metrics with pass@1
|
| 190 |
+
metrics, results, final_metadata = codegen_metrics(
|
| 191 |
+
samples_list,
|
| 192 |
+
generations_list,
|
| 193 |
+
k_list=[1], # Evaluate the top 1 generated result
|
| 194 |
+
num_process_evaluate=2, # Parallel evaluation
|
| 195 |
+
timeout=10, # Set timeout to 10 seconds
|
| 196 |
+
debug=False, # Enable debug mode
|
| 197 |
+
)
|
| 198 |
+
# print('samples_list', samples_list)
|
| 199 |
+
# print('generations_list', generations_list)
|
| 200 |
+
# print('metrics', metrics)
|
| 201 |
+
|
| 202 |
+
# Extract pass@1
|
| 203 |
+
pass_at_1 = metrics.get('pass@1', 0.0)
|
| 204 |
+
detail_pass_at_1 = metrics['detail']['pass@1']
|
| 205 |
+
|
| 206 |
+
for item, pass1, res, meta in zip(filtered_data, detail_pass_at_1.values(), results.values(), final_metadata):
|
| 207 |
+
item['Metrics'] = {'pass@1': pass1}
|
| 208 |
+
item['Results'] = res
|
| 209 |
+
item['Final_metadata'] = meta
|
| 210 |
+
|
| 211 |
+
# Initialize per-difficulty metrics
|
| 212 |
+
difficulty_metrics = defaultdict(list)
|
| 213 |
+
for idx, difficulty in enumerate(difficulties):
|
| 214 |
+
pass1 = detail_pass_at_1[idx]
|
| 215 |
+
difficulty_metrics[difficulty].append(pass1)
|
| 216 |
+
|
| 217 |
+
# Compute overall pass@1
|
| 218 |
+
overall_metrics = {
|
| 219 |
+
'pass@1': pass_at_1, # / num_valid_answer * len(input_list),
|
| 220 |
+
'num_valid_answer': f'{num_valid_answer} of {len(input_list)}',
|
| 221 |
+
'query_latency': f'{(total_time / len(input_list) * 1000):.0f} ms',
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
# Compute per-difficulty pass@1
|
| 225 |
+
per_difficulty_metrics = {}
|
| 226 |
+
for difficulty, passes in difficulty_metrics.items():
|
| 227 |
+
avg_pass = np.mean(passes) if len(passes) > 0 else 0.0
|
| 228 |
+
num_valid_answer = per_difficulty_count[difficulty]
|
| 229 |
+
per_difficulty_metrics[difficulty] = {
|
| 230 |
+
'pass@1': avg_pass,
|
| 231 |
+
'num_valid_answer': f'{num_valid_answer} of {len(passes)}'
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
# Save the metrics
|
| 235 |
+
final_metrics = {
|
| 236 |
+
'overall': overall_metrics,
|
| 237 |
+
'per_domain': per_difficulty_metrics
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
else:
|
| 241 |
+
# Existing evaluation for other datasets
|
| 242 |
+
avg_em, avg_acc, avg_f1, avg_math = [], [], [], []
|
| 243 |
+
num_valid_answer = 0
|
| 244 |
+
|
| 245 |
+
# If the dataset is GPQA, track metrics per domain
|
| 246 |
+
domain_metrics = {}
|
| 247 |
+
|
| 248 |
+
for item, input_prompt, result in zip(filtered_data, input_list, output_list):
|
| 249 |
+
if type(result) == str:
|
| 250 |
+
item['Output'] = result
|
| 251 |
+
else:
|
| 252 |
+
item['Output'] = result.outputs[0].text
|
| 253 |
+
if dataset_name in ['gpqa', 'medmcqa']:
|
| 254 |
+
labeled_answer = item["Correct Choice"]
|
| 255 |
+
# labeled_choice_answer = item["Correct Answer"]
|
| 256 |
+
mode = 'choose'
|
| 257 |
+
elif dataset_name in ['math500', 'aime', 'amc']:
|
| 258 |
+
labeled_answer = item["answer"]
|
| 259 |
+
mode = 'gen'
|
| 260 |
+
elif dataset_name in ['simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 261 |
+
labeled_answer = item["answer"]
|
| 262 |
+
mode = 'qa'
|
| 263 |
+
elif dataset_name in ['pubhealth']:
|
| 264 |
+
labeled_answer = item["answer"]
|
| 265 |
+
mode = 'choose'
|
| 266 |
+
else:
|
| 267 |
+
raise ValueError(f"Unknown dataset_name: {dataset_name}")
|
| 268 |
+
|
| 269 |
+
metric, pred_answer = evaluate_predictions(output=item['Output'], labeled_answer=labeled_answer, mode=mode)
|
| 270 |
+
item['Pred_Answer'] = pred_answer
|
| 271 |
+
item['Metrics'] = metric
|
| 272 |
+
item['Question'] = input_prompt
|
| 273 |
+
|
| 274 |
+
# Determine the validity of the predicted answer
|
| 275 |
+
my_method_valid = (pred_answer != '' and not (mode == 'choose' and dataset_name == 'gpqa' and len(pred_answer) > 1))
|
| 276 |
+
|
| 277 |
+
avg_em.append(metric['em'])
|
| 278 |
+
avg_acc.append(metric['acc'])
|
| 279 |
+
avg_f1.append(metric['f1'])
|
| 280 |
+
avg_math.append(metric['math_equal'])
|
| 281 |
+
|
| 282 |
+
if my_method_valid:
|
| 283 |
+
num_valid_answer += 1
|
| 284 |
+
|
| 285 |
+
# If the dataset is GPQA, attempt to track metrics per domain
|
| 286 |
+
if dataset_name == 'gpqa':
|
| 287 |
+
domain = item.get("High-level domain", "Unknown")
|
| 288 |
+
if domain not in domain_metrics:
|
| 289 |
+
domain_metrics[domain] = {'em': [], 'acc': [], 'f1': [], 'math_equal': [], 'num_valid_answer': 0, 'total_num': 0}
|
| 290 |
+
domain_metrics[domain]['total_num'] += 1
|
| 291 |
+
domain_metrics[domain]['em'].append(metric['em'])
|
| 292 |
+
domain_metrics[domain]['acc'].append(metric['acc'])
|
| 293 |
+
domain_metrics[domain]['f1'].append(metric['f1'])
|
| 294 |
+
domain_metrics[domain]['math_equal'].append(metric['math_equal'])
|
| 295 |
+
if my_method_valid:
|
| 296 |
+
domain_metrics[domain]['num_valid_answer'] += 1
|
| 297 |
+
|
| 298 |
+
t = time.localtime()
|
| 299 |
+
result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
|
| 300 |
+
metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
|
| 301 |
+
|
| 302 |
+
# Compute overall metrics
|
| 303 |
+
overall_results = {
|
| 304 |
+
'em': np.mean(avg_em) if len(avg_em) > 0 else 0.0,
|
| 305 |
+
'acc': np.mean(avg_acc) if len(avg_acc) > 0 else 0.0,
|
| 306 |
+
'f1': np.mean(avg_f1) if len(avg_f1) > 0 else 0.0,
|
| 307 |
+
'math_equal': np.mean(avg_math) if len(avg_em) > 0 else 0.0,
|
| 308 |
+
'num_valid_answer': f'{num_valid_answer} of {len(input_list)}',
|
| 309 |
+
'query_latency': f'{(total_time / len(input_list) * 1000):.0f} ms',
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
# If the dataset is GPQA, output average metrics per domain
|
| 313 |
+
domain_avg_metrics = {}
|
| 314 |
+
if dataset_name == 'gpqa':
|
| 315 |
+
for dm, m in domain_metrics.items():
|
| 316 |
+
domain_avg_metrics[dm] = {
|
| 317 |
+
'em': np.mean(m['em']) if len(m['em']) > 0 else 0,
|
| 318 |
+
'acc': np.mean(m['acc']) if len(m['acc']) > 0 else 0,
|
| 319 |
+
'f1': np.mean(m['f1']) if len(m['f1']) > 0 else 0,
|
| 320 |
+
'math_equal': np.mean(m['math_equal']) if len(m['math_equal']) > 0 else 0,
|
| 321 |
+
'num_valid_answer': f'{m["num_valid_answer"]} of {m["total_num"]}'
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
# 保存总体和分domain的指标
|
| 325 |
+
final_metrics = {'overall': overall_results}
|
| 326 |
+
if dataset_name == 'gpqa':
|
| 327 |
+
final_metrics['per_domain'] = domain_avg_metrics
|
| 328 |
+
|
| 329 |
+
t = time.localtime()
|
| 330 |
+
result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
|
| 331 |
+
metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
|
| 332 |
+
if apply_backoff:
|
| 333 |
+
result_json_name = output_dir
|
| 334 |
+
metrics_json_name = output_dir.replace('.json', '.metrics.backoff.json')
|
| 335 |
+
|
| 336 |
+
# Save prediction results and metrics
|
| 337 |
+
with open(os.path.join(output_dir, result_json_name), mode='w', encoding='utf-8') as json_file:
|
| 338 |
+
json.dump(filtered_data, json_file, indent=4, ensure_ascii=False)
|
| 339 |
+
|
| 340 |
+
with open(os.path.join(output_dir, metrics_json_name), mode='w', encoding='utf-8') as json_file:
|
| 341 |
+
json.dump(final_metrics, json_file, indent=4, ensure_ascii=False)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
if __name__ == "__main__":
|
| 346 |
+
import argparse
|
| 347 |
+
|
| 348 |
+
# Parse command-line arguments for flexibility
|
| 349 |
+
parser = argparse.ArgumentParser(description="Evaluate model outputs with optional backoff.")
|
| 350 |
+
parser.add_argument('--output_path', type=str, required=True, help='Path to the model output JSON file.')
|
| 351 |
+
parser.add_argument('--output_metrics_path', type=str, help='Path to save the evaluation metrics.')
|
| 352 |
+
parser.add_argument('--apply_backoff', action='store_true', help='Enable backoff to normal outputs if main output is invalid.')
|
| 353 |
+
args = parser.parse_args()
|
| 354 |
+
|
| 355 |
+
output_path = args.output_path
|
| 356 |
+
if args.output_metrics_path:
|
| 357 |
+
output_metrics_path = args.output_metrics_path
|
| 358 |
+
else:
|
| 359 |
+
output_metrics_path = output_path.replace('.json', '.metrics.json')
|
| 360 |
+
|
| 361 |
+
# Determine dataset name based on the output path
|
| 362 |
+
# NOTE: To apply back off strategy for retrieval-augmented reasoning methods, please replace normal_output_path with your actual path for results with run_direct_gen.
|
| 363 |
+
if 'gpqa' in output_path:
|
| 364 |
+
dataset_name = 'gpqa'
|
| 365 |
+
normal_output_path = './outputs/gpqa.qwq.direct/diamond.12.13,18:23.json'
|
| 366 |
+
if 'extended' in output_path:
|
| 367 |
+
normal_output_path = './outputs/gpqa.qwq.direct/extended.12.28,15:44.json'
|
| 368 |
+
if 'qwq' not in output_path:
|
| 369 |
+
normal_output_path = './outputs/runs.baselines/gpqa.qwen2.5-32b-instruct.direct/diamond.12.14,20:34.json'
|
| 370 |
+
elif 'math500' in output_path:
|
| 371 |
+
dataset_name = 'math500'
|
| 372 |
+
normal_output_path = './outputs/math500.qwq.direct/test.12.13,18:26.json'
|
| 373 |
+
if 'qwq' not in output_path:
|
| 374 |
+
normal_output_path = './outputs/runs.baselines/math500.qwen2.5-32b-instruct.direct/test.12.15,10:43.json'
|
| 375 |
+
elif 'aime' in output_path:
|
| 376 |
+
dataset_name = 'aime'
|
| 377 |
+
normal_output_path = './outputs/aime.qwq.direct/2024.12.13,19:36.json'
|
| 378 |
+
if 'qwq' not in output_path:
|
| 379 |
+
normal_output_path = './outputs/runs.baselines/aime.qwen2.5-32b-instruct.direct/test.12.14,20:28.json'
|
| 380 |
+
elif 'amc' in output_path:
|
| 381 |
+
dataset_name = 'amc'
|
| 382 |
+
normal_output_path = './outputs/amc.qwq.direct/test.12.14,14:31.json'
|
| 383 |
+
if 'qwq' not in output_path:
|
| 384 |
+
normal_output_path = './outputs/runs.baselines/amc.qwen2.5-32b-instruct.direct/test.12.14,20:26.json'
|
| 385 |
+
elif 'livecode' in output_path:
|
| 386 |
+
dataset_name = 'livecode'
|
| 387 |
+
normal_output_path = './outputs/livecode.qwq.direct/test.12.13,21:24.json'
|
| 388 |
+
if 'qwq' not in output_path:
|
| 389 |
+
normal_output_path = './outputs/runs.baselines/livecode.qwen2.5-32b-instruct.direct/test.12.14,20:32.json'
|
| 390 |
+
elif 'nq' in output_path:
|
| 391 |
+
dataset_name = 'nq'
|
| 392 |
+
normal_output_path = './outputs/runs.qa/nq.qwq.direct/test.12.15,14:50.json'
|
| 393 |
+
if 'qwq' not in output_path:
|
| 394 |
+
normal_output_path = ''
|
| 395 |
+
elif 'triviaqa' in output_path:
|
| 396 |
+
dataset_name = 'triviaqa'
|
| 397 |
+
normal_output_path = './outputs/runs.qa/triviaqa.qwq.direct/test.12.15,15:35.json'
|
| 398 |
+
if 'qwq' not in output_path:
|
| 399 |
+
normal_output_path = ''
|
| 400 |
+
elif 'hotpotqa' in output_path:
|
| 401 |
+
dataset_name = 'hotpotqa'
|
| 402 |
+
normal_output_path = './outputs/runs.qa/hotpotqa.qwq.direct/test.12.15,14:52.json'
|
| 403 |
+
if 'qwq' not in output_path:
|
| 404 |
+
normal_output_path = ''
|
| 405 |
+
elif 'musique' in output_path:
|
| 406 |
+
dataset_name = 'musique'
|
| 407 |
+
normal_output_path = './outputs/runs.qa/musique.qwq.direct/test.12.27,16:44.json'
|
| 408 |
+
if 'qwq' not in output_path:
|
| 409 |
+
normal_output_path = ''
|
| 410 |
+
elif 'bamboogle' in output_path:
|
| 411 |
+
dataset_name = 'bamboogle'
|
| 412 |
+
normal_output_path = './outputs/runs.qa/bamboogle.qwq.direct/test.12.28,9:51.json'
|
| 413 |
+
if 'qwq' not in output_path:
|
| 414 |
+
normal_output_path = ''
|
| 415 |
+
elif '2wiki' in output_path:
|
| 416 |
+
dataset_name = '2wiki'
|
| 417 |
+
normal_output_path = './outputs/runs.qa/2wiki.qwq.direct/test.12.15,15:32.json'
|
| 418 |
+
if 'qwq' not in output_path:
|
| 419 |
+
normal_output_path = ''
|
| 420 |
+
elif 'medmcqa' in output_path:
|
| 421 |
+
dataset_name = 'medmcqa'
|
| 422 |
+
normal_output_path = './outputs/runs.qa/medmcqa.qwq.direct/test.12.15,16:57.json'
|
| 423 |
+
if 'qwq' not in output_path:
|
| 424 |
+
normal_output_path = ''
|
| 425 |
+
elif 'pubhealth' in output_path:
|
| 426 |
+
dataset_name = 'pubhealth'
|
| 427 |
+
normal_output_path = './outputs/runs.qa/pubhealth.qwq.direct/test.12.15,20:32.json'
|
| 428 |
+
if 'qwq' not in output_path:
|
| 429 |
+
normal_output_path = ''
|
| 430 |
+
|
| 431 |
+
# Load main output data
|
| 432 |
+
with open(output_path, mode='r', encoding='utf-8') as file:
|
| 433 |
+
data = json.load(file)
|
| 434 |
+
|
| 435 |
+
# Load main metrics data
|
| 436 |
+
with open(output_metrics_path, mode='r', encoding='utf-8') as file:
|
| 437 |
+
metrics = json.load(file)
|
| 438 |
+
|
| 439 |
+
# Extract existing metrics
|
| 440 |
+
if 'overall' in metrics:
|
| 441 |
+
query_latency = metrics['overall']['query_latency']
|
| 442 |
+
original_num_valid_answer = metrics['overall']['num_valid_answer']
|
| 443 |
+
else:
|
| 444 |
+
query_latency = metrics.get('query_latency', 'N/A')
|
| 445 |
+
original_num_valid_answer = metrics.get('num_valid_answer', 'N/A')
|
| 446 |
+
|
| 447 |
+
# Load normal output data if backoff is enabled
|
| 448 |
+
normal_data = None
|
| 449 |
+
if args.apply_backoff:
|
| 450 |
+
if not os.path.exists(normal_output_path):
|
| 451 |
+
raise FileNotFoundError(f"Normal output file not found at: {normal_output_path}")
|
| 452 |
+
with open(normal_output_path, mode='r', encoding='utf-8') as file:
|
| 453 |
+
normal_data = json.load(file)
|
| 454 |
+
|
| 455 |
+
if dataset_name != 'livecode':
|
| 456 |
+
# Existing evaluation for non-livecode datasets
|
| 457 |
+
avg_em, avg_acc, avg_f1, avg_math = [], [], [], []
|
| 458 |
+
num_valid_answer = 0
|
| 459 |
+
|
| 460 |
+
# Initialize per-domain metrics
|
| 461 |
+
domain_metrics = {}
|
| 462 |
+
|
| 463 |
+
for i, item in enumerate(data):
|
| 464 |
+
if dataset_name in ['gpqa', 'medmcqa']:
|
| 465 |
+
labeled_answer = item["Correct Choice"]
|
| 466 |
+
domain = item.get("High-level domain", "Unknown")
|
| 467 |
+
mode = 'choose'
|
| 468 |
+
elif dataset_name == 'math500':
|
| 469 |
+
labeled_answer = item["answer"]
|
| 470 |
+
domain = item.get("level", "Unknown")
|
| 471 |
+
mode = 'gen'
|
| 472 |
+
elif dataset_name in ['aime', 'amc']:
|
| 473 |
+
labeled_answer = item["answer"]
|
| 474 |
+
mode = 'gen'
|
| 475 |
+
domain = 'Unknown'
|
| 476 |
+
elif dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 477 |
+
labeled_answer = item["answer"]
|
| 478 |
+
mode = 'qa'
|
| 479 |
+
domain = 'Unknown'
|
| 480 |
+
elif dataset_name in ['pubhealth']:
|
| 481 |
+
labeled_answer = item["answer"]
|
| 482 |
+
mode = 'choose'
|
| 483 |
+
domain = 'Unknown'
|
| 484 |
+
else:
|
| 485 |
+
raise ValueError(f"Unsupported dataset: {dataset_name}")
|
| 486 |
+
|
| 487 |
+
output = item['Output']
|
| 488 |
+
|
| 489 |
+
metric, pred_answer = evaluate_predictions(
|
| 490 |
+
output=output,
|
| 491 |
+
labeled_answer=labeled_answer,
|
| 492 |
+
mode=mode,
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
# Determine if the main method's answer is valid
|
| 496 |
+
my_method_valid = (pred_answer != '' and not (mode == 'choose' and dataset_name == 'gpqa' and len(pred_answer) > 1))
|
| 497 |
+
|
| 498 |
+
# If invalid and backoff is enabled, use normal method's output
|
| 499 |
+
if args.apply_backoff and not my_method_valid and normal_data is not None:
|
| 500 |
+
normal_item = normal_data[i]
|
| 501 |
+
if dataset_name in ['gpqa', 'medmcqa']:
|
| 502 |
+
normal_labeled_answer = normal_item["Correct Choice"]
|
| 503 |
+
normal_mode = 'choose'
|
| 504 |
+
elif dataset_name == 'math500':
|
| 505 |
+
normal_labeled_answer = normal_item["answer"]
|
| 506 |
+
normal_mode = 'gen'
|
| 507 |
+
elif dataset_name in ['aime', 'amc']:
|
| 508 |
+
normal_labeled_answer = normal_item["answer"]
|
| 509 |
+
normal_mode = 'gen'
|
| 510 |
+
elif dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 511 |
+
normal_labeled_answer = normal_item["answer"]
|
| 512 |
+
normal_mode = 'qa'
|
| 513 |
+
elif dataset_name in ['pubhealth']:
|
| 514 |
+
normal_labeled_answer = normal_item["answer"]
|
| 515 |
+
normal_mode = 'choose'
|
| 516 |
+
else:
|
| 517 |
+
raise ValueError(f"Unsupported dataset for backoff: {dataset_name}")
|
| 518 |
+
|
| 519 |
+
normal_output = normal_item['Output']
|
| 520 |
+
|
| 521 |
+
normal_metric, normal_pred_answer = evaluate_predictions(
|
| 522 |
+
output=normal_output,
|
| 523 |
+
labeled_answer=normal_labeled_answer,
|
| 524 |
+
mode=normal_mode,
|
| 525 |
+
)
|
| 526 |
+
normal_valid = (normal_pred_answer != '' and not (normal_mode == 'choose' and dataset_name == 'gpqa' and len(normal_pred_answer) > 1))
|
| 527 |
+
|
| 528 |
+
# Use normal method's result if valid
|
| 529 |
+
if normal_valid:
|
| 530 |
+
metric = normal_metric
|
| 531 |
+
pred_answer = normal_pred_answer
|
| 532 |
+
my_method_valid = True
|
| 533 |
+
|
| 534 |
+
# Track metrics per domain
|
| 535 |
+
if domain not in domain_metrics:
|
| 536 |
+
domain_metrics[domain] = {'em': [], 'acc': [], 'f1': [], 'math_equal': [], 'num_valid_answer': 0, 'total_num': 0}
|
| 537 |
+
domain_metrics[domain]['total_num'] += 1
|
| 538 |
+
|
| 539 |
+
avg_em.append(metric['em'])
|
| 540 |
+
avg_acc.append(metric['acc'])
|
| 541 |
+
avg_f1.append(metric['f1'])
|
| 542 |
+
avg_math.append(metric['math_equal'])
|
| 543 |
+
domain_metrics[domain]['em'].append(metric['em'])
|
| 544 |
+
domain_metrics[domain]['acc'].append(metric['acc'])
|
| 545 |
+
domain_metrics[domain]['f1'].append(metric['f1'])
|
| 546 |
+
domain_metrics[domain]['math_equal'].append(metric['math_equal'])
|
| 547 |
+
|
| 548 |
+
if my_method_valid:
|
| 549 |
+
num_valid_answer += 1
|
| 550 |
+
domain_metrics[domain]['num_valid_answer'] += 1
|
| 551 |
+
|
| 552 |
+
# Compute overall metrics
|
| 553 |
+
overall_metrics = {
|
| 554 |
+
'em': np.mean(avg_em) if len(avg_em) > 0 else 0,
|
| 555 |
+
'acc': np.mean(avg_acc) if len(avg_acc) > 0 else 0,
|
| 556 |
+
'f1': np.mean(avg_f1) if len(avg_f1) > 0 else 0,
|
| 557 |
+
'math_equal': np.mean(avg_math) if len(avg_math) > 0 else 0,
|
| 558 |
+
'num_valid_answer': f'{num_valid_answer} of {len(data)}',
|
| 559 |
+
'query_latency': query_latency,
|
| 560 |
+
}
|
| 561 |
+
if args.apply_backoff:
|
| 562 |
+
overall_metrics['original_num_valid_answer'] = original_num_valid_answer
|
| 563 |
+
|
| 564 |
+
# Compute per-domain metrics
|
| 565 |
+
domain_avg_metrics = {}
|
| 566 |
+
for dm, m in domain_metrics.items():
|
| 567 |
+
domain_avg_metrics[dm] = {
|
| 568 |
+
'em': np.mean(m['em']) if len(m['em']) > 0 else 0,
|
| 569 |
+
'acc': np.mean(m['acc']) if len(m['acc']) > 0 else 0,
|
| 570 |
+
'f1': np.mean(m['f1']) if len(m['f1']) > 0 else 0,
|
| 571 |
+
'math_equal': np.mean(m['math_equal']) if len(m['math_equal']) > 0 else 0,
|
| 572 |
+
'num_valid_answer': f'{m["num_valid_answer"]} of {m["total_num"]}',
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
# Prepare final metrics
|
| 576 |
+
final_metrics = {'overall': overall_metrics}
|
| 577 |
+
if dataset_name == 'gpqa':
|
| 578 |
+
final_metrics['per_domain'] = domain_avg_metrics
|
| 579 |
+
|
| 580 |
+
else:
|
| 581 |
+
# Evaluation and backoff for livecode dataset
|
| 582 |
+
split = 'test' # Modify as needed or extract from output_path
|
| 583 |
+
|
| 584 |
+
if args.apply_backoff and normal_data is not None:
|
| 585 |
+
# Apply backoff by replacing invalid outputs with normal outputs
|
| 586 |
+
for i, item in enumerate(data):
|
| 587 |
+
# Extract Pred_Answer from main output
|
| 588 |
+
pred_answer = item['Pred_Answer']
|
| 589 |
+
|
| 590 |
+
# Check if Pred_Answer is invalid
|
| 591 |
+
if pred_answer == '':
|
| 592 |
+
# Replace Output with normal output
|
| 593 |
+
item['Output'] = normal_data[i]['Output']
|
| 594 |
+
|
| 595 |
+
# Prepare input_list and output_list for run_evaluation
|
| 596 |
+
input_list = [item['Question'] for item in data]
|
| 597 |
+
output_list = [item['Output'] for item in data]
|
| 598 |
+
|
| 599 |
+
# Estimate total_time (if available). Here, set to 0 as a placeholder.
|
| 600 |
+
total_time = 0 # Modify if timing information is available
|
| 601 |
+
|
| 602 |
+
# Run evaluation
|
| 603 |
+
run_evaluation(
|
| 604 |
+
filtered_data=data,
|
| 605 |
+
input_list=input_list,
|
| 606 |
+
output_list=output_list,
|
| 607 |
+
dataset_name=dataset_name,
|
| 608 |
+
output_dir=output_path,
|
| 609 |
+
total_time=total_time,
|
| 610 |
+
split=split,
|
| 611 |
+
apply_backoff=True,
|
| 612 |
+
)
|
| 613 |
+
# run_evaluation handles saving the metrics for livecode
|
| 614 |
+
|
| 615 |
+
# Save metrics for non-livecode datasets
|
| 616 |
+
if dataset_name != 'livecode' or not args.apply_backoff:
|
| 617 |
+
# If dataset is livecode and backoff was applied, metrics are already saved by run_evaluation
|
| 618 |
+
if args.apply_backoff:
|
| 619 |
+
output_metrics_path = output_metrics_path.replace('.json', '.backoff.json')
|
| 620 |
+
with open(output_metrics_path, mode='w', encoding='utf-8') as json_file:
|
| 621 |
+
json.dump(final_metrics, json_file, indent=4, ensure_ascii=False)
|
| 622 |
+
|
| 623 |
+
print(f"Evaluation completed. Metrics saved to {output_metrics_path}")
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
if __name__ == "__main__":
|
| 627 |
+
raw_output="**Final Information**\n\nScott Derrickson is an American filmmaker. This information is directly stated in multiple web pages, including \"Scott Derrickson - Wikipedia\" and \"Scott Derrickson - Biography - IMDb\". Given the context of his education and career, it is clear that Scott Derrickson is American."
|
| 628 |
+
extracted_info = extract_answer(raw_output, mode='infogen')
|
| 629 |
+
print(extracted_info)
|
deep_search/search_o1/scripts/llm_as_judge.py
ADDED
|
@@ -0,0 +1,496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from multiprocessing import Pool, cpu_count
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
import torch
|
| 8 |
+
# from .call_vllm import extract_answer
|
| 9 |
+
import argparse
|
| 10 |
+
import json
|
| 11 |
+
from typing import List, Dict, Tuple
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
from transformers import AutoTokenizer
|
| 14 |
+
from vllm import LLM, SamplingParams
|
| 15 |
+
|
| 16 |
+
# os.environ["OPENAI_API_KEY"] = "sk-MFS7BpEBFY34LEpamJ885gfE7mYrs1qEay9EA6Vn2LMX61Mp"
|
| 17 |
+
os.environ["OPENAI_API_KEY"] = "sk-UqtaICKlpmRBk8iPb3SptBVC4IzlIip4SOa5BqrQK4TkwKYt"
|
| 18 |
+
os.environ["OPENAI_API_BASE"] = "https://aigc.x-see.cn/v1/"
|
| 19 |
+
|
| 20 |
+
client = OpenAI(
|
| 21 |
+
api_key=os.environ.get("OPENAI_API_KEY"),
|
| 22 |
+
base_url=os.environ.get("OPENAI_API_BASE")
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
def process_prompt(kwargs):
|
| 26 |
+
model_doc_reason_path, prompt = kwargs['model_doc_reason_path'], kwargs['prompt']
|
| 27 |
+
max_tokens = 1024
|
| 28 |
+
while True:
|
| 29 |
+
try:
|
| 30 |
+
completion = client.chat.completions.create(
|
| 31 |
+
model=model_doc_reason_path,
|
| 32 |
+
messages=[{'role': 'user', 'content': prompt}],
|
| 33 |
+
max_tokens=max_tokens,
|
| 34 |
+
temperature=0.7,
|
| 35 |
+
# top_p=0.8,
|
| 36 |
+
)
|
| 37 |
+
text = completion.choices[0].message.content
|
| 38 |
+
return text
|
| 39 |
+
except Exception as e:
|
| 40 |
+
print(f"Error: {e}")
|
| 41 |
+
time.sleep(0.5)
|
| 42 |
+
def extract_answer(output, mode='qa'):
|
| 43 |
+
extracted_text = ''
|
| 44 |
+
if mode == 'codegen':
|
| 45 |
+
# Extract the code between ```python and ```
|
| 46 |
+
pattern = r'```python\s*(.*?)\s*```'
|
| 47 |
+
matches = re.findall(pattern, output, re.DOTALL | re.IGNORECASE)
|
| 48 |
+
if matches:
|
| 49 |
+
extracted_text = matches[-1].strip() # Take the last match
|
| 50 |
+
elif mode == 'infogen': # 提取模型基于网页内容生成的推理
|
| 51 |
+
# Extract content after **Final Information** or **Modified Reasoning Steps**
|
| 52 |
+
# pattern_info = "\n**Final Information**"
|
| 53 |
+
# pattern_step = "\n**Modified Reasoning Steps**"
|
| 54 |
+
pattern_info = "**Final Information**"
|
| 55 |
+
pattern_step = "**Modified Reasoning Steps**"
|
| 56 |
+
if pattern_info in output:
|
| 57 |
+
extracted_text = output.split(pattern_info)[-1].replace("\n","").strip("```").strip()
|
| 58 |
+
elif pattern_step in output:
|
| 59 |
+
extracted_text = output.split(pattern_step)[-1].strip("```").strip()
|
| 60 |
+
else:
|
| 61 |
+
# extracted_text = "No helpful information found."
|
| 62 |
+
extracted_text = output
|
| 63 |
+
else:
|
| 64 |
+
# Existing extraction logic for 'gen' and 'choose' modes
|
| 65 |
+
pattern = r'\\boxed\{(.*)\}'
|
| 66 |
+
matches = re.findall(pattern, output)
|
| 67 |
+
if matches:
|
| 68 |
+
extracted_text = matches[-1] # Take the last match
|
| 69 |
+
if mode in ['choose', 'qa']:
|
| 70 |
+
# Handle 'choose' mode
|
| 71 |
+
inner_pattern = r'\\text\{(.*)\}'
|
| 72 |
+
inner_matches = re.findall(inner_pattern, extracted_text)
|
| 73 |
+
if inner_matches:
|
| 74 |
+
extracted_text = inner_matches[-1] # Take the last match
|
| 75 |
+
extracted_text = extracted_text.strip("()")
|
| 76 |
+
return extracted_text
|
| 77 |
+
|
| 78 |
+
GRADER_TEMPLATE = """
|
| 79 |
+
Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
|
| 80 |
+
First, I will give examples of each grade, and then you will grade a new example.
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
The following are examples of CORRECT predicted answers.
|
| 84 |
+
```
|
| 85 |
+
Question: What are the names of Barack Obama's children?
|
| 86 |
+
Gold target: Malia Obama and Sasha Obama
|
| 87 |
+
Predicted answer 1: sasha and malia obama
|
| 88 |
+
Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
|
| 89 |
+
Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
|
| 90 |
+
```
|
| 91 |
+
These predicted answers are all CORRECT because:
|
| 92 |
+
- They fully contain the important information in the gold target.
|
| 93 |
+
- They do not contain any information that contradicts the gold target.
|
| 94 |
+
- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
|
| 95 |
+
- Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
The following are examples of INCORRECT predicted answers.
|
| 99 |
+
```
|
| 100 |
+
Question: What are the names of Barack Obama's children?
|
| 101 |
+
Gold target: Malia and Sasha
|
| 102 |
+
Predicted answer 1: Malia.
|
| 103 |
+
Predicted answer 2: Malia, Sasha, and Susan.
|
| 104 |
+
Predicted answer 3: Barack Obama does not have any children.
|
| 105 |
+
Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
|
| 106 |
+
Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
|
| 107 |
+
Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
|
| 108 |
+
Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
|
| 109 |
+
```
|
| 110 |
+
These predicted answers are all INCORRECT because:
|
| 111 |
+
- A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
The following are examples of NOT_ATTEMPTED predicted answers.
|
| 115 |
+
```
|
| 116 |
+
Question: What are the names of Barack Obama's children?
|
| 117 |
+
Gold target: Malia and Sasha
|
| 118 |
+
Predicted answer 1: I don't know.
|
| 119 |
+
Predicted answer 2: I need more context about which Obama you are talking about.
|
| 120 |
+
Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
|
| 121 |
+
Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
|
| 122 |
+
```
|
| 123 |
+
These predicted answers are all NOT_ATTEMPTED because:
|
| 124 |
+
- The important information in the gold target is not included in the answer.
|
| 125 |
+
- No statements in the answer contradict the gold target.
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
Also note the following things:
|
| 129 |
+
- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
|
| 130 |
+
- Predicted answers "120k", "124k", and 115k" are all CORRECT.
|
| 131 |
+
- Predicted answers "100k" and "113k" are INCORRECT.
|
| 132 |
+
- Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
|
| 133 |
+
- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
|
| 134 |
+
- For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
|
| 135 |
+
- Do not punish predicted answers if they omit information that would be clearly inferred from the question.
|
| 136 |
+
- For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
|
| 137 |
+
- Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
|
| 138 |
+
- For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
|
| 139 |
+
- For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
|
| 140 |
+
- Do not punish for typos in people's name if it's clearly the same name.
|
| 141 |
+
- For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
| 145 |
+
```
|
| 146 |
+
Question: {question}
|
| 147 |
+
Gold target: {target}
|
| 148 |
+
Predicted answer: {predicted_answer}
|
| 149 |
+
```
|
| 150 |
+
|
| 151 |
+
Grade the predicted answer of this new question as one of:
|
| 152 |
+
A: CORRECT
|
| 153 |
+
B: INCORRECT
|
| 154 |
+
C: NOT_ATTEMPTED
|
| 155 |
+
|
| 156 |
+
Just return the letters "A", "B", or "C". Provide your final answer in the format \\boxed{{LETTER}}.\n\n
|
| 157 |
+
""".strip()
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
CHINESE_GRADER_TEMPLATE = """
|
| 161 |
+
请根据给定问题、标准答案和模型预测的答案来评估模型的回答是否正确。您的任务是将结果评定为:【正确】、【错误】或【未尝试】。
|
| 162 |
+
|
| 163 |
+
首先,我们将列出每个评定类别的示例,然后请您对新问题的预测答案进行评定。
|
| 164 |
+
以下是【正确】的答复示例:
|
| 165 |
+
```
|
| 166 |
+
问题:贝拉克·奥巴马的孩子叫什么名字?
|
| 167 |
+
标准答案:玛丽亚·奥巴马和萨莎·奥巴马
|
| 168 |
+
模型预测1:Malia Obama and Sasha Obama
|
| 169 |
+
模型预测2:玛丽亚和萨沙
|
| 170 |
+
模型预测3:大多数人会说是玛丽亚和萨莎,但我不确定,需要再确认
|
| 171 |
+
模型预测4:巴拉克·奥巴马有两个女儿,她们分别是玛丽亚·安和娜塔莎·玛丽安,但通常称作玛丽亚·奥巴马和萨莎·奥巴马。玛丽亚出生于1998年7月4日,萨莎出生于2001年6月10日。
|
| 172 |
+
```
|
| 173 |
+
这些答复均为【正确】,因为:
|
| 174 |
+
- 完整地包含了标准答案中的重要信��。
|
| 175 |
+
- 不包含任何与标准答案矛盾的信息。
|
| 176 |
+
- 只关注语义内容,中英文,大小写、标点、语法和顺序不重要。
|
| 177 |
+
- 答复中出现模糊语句或猜测是可以接受的,前提是包含了标准答案且不含有不正确信息或矛盾。
|
| 178 |
+
|
| 179 |
+
以下是【错误】的答复示例:
|
| 180 |
+
```
|
| 181 |
+
问题:巴拉克·奥巴马的孩子叫什么名字?
|
| 182 |
+
标准答案:玛丽亚·奥巴马和萨莎·奥巴马
|
| 183 |
+
模型预测1:玛丽亚
|
| 184 |
+
模型预测2:玛丽亚、萨莎和苏珊
|
| 185 |
+
模型预测3:巴拉克·奥巴马没有孩子
|
| 186 |
+
模型预测4:我认为是玛丽亚和萨莎。或者是玛丽亚和杰基。或者是乔伊和玛丽亚。
|
| 187 |
+
模型预测5:虽然我不知道他们的确切名字,但能说出巴拉克·奥巴马有三个孩子。
|
| 188 |
+
模型预测6:你可能是想说贝茜和奥利维亚。不过您应通过最新的参考资料确认详细信息。那是正确的答案吗?
|
| 189 |
+
```
|
| 190 |
+
这些答复均为【错误】,因为:
|
| 191 |
+
- 答复中包含与标准答案矛盾的事实陈述。即使在陈述中略带保留(例如:“可能是”,“虽然我不确定,但我认为”),也视为错误。
|
| 192 |
+
|
| 193 |
+
以下是【未尝试】的答复示例:
|
| 194 |
+
```
|
| 195 |
+
问题:巴拉克·奥巴马的孩子叫什么名字?
|
| 196 |
+
标准答案:玛丽亚·奥巴马和萨莎·奥巴马
|
| 197 |
+
模型预测1:我不知道。
|
| 198 |
+
模型预测2:我需要更多关于您所指奥巴马的上下文。
|
| 199 |
+
模型预测3:不查阅网络我无法回答这个问题,不过我知道巴拉克·奥巴马有两个孩子。
|
| 200 |
+
模型预测4:巴拉克·奥巴马有两个孩子。我知道其中一个叫玛丽亚,但我不确定另一个的名字。
|
| 201 |
+
```
|
| 202 |
+
这些答复均为【未尝试】,因为:
|
| 203 |
+
- 没有包含标准答案中的重要信息。
|
| 204 |
+
- 回复中没有与标准答案矛盾的陈述。
|
| 205 |
+
|
| 206 |
+
另外注意以下几点:
|
| 207 |
+
- 对于标准答案为数字的问题,预测答案应和标准答案一致。例如,考虑问题“金山铁路黄浦江特大桥的全长是多少米?”,标准答案为“3518.17”:
|
| 208 |
+
- 预测答案“3518”、“3518.1”、“3518.17”均为【正确】。
|
| 209 |
+
- 预测答案“3520”和“3600”均为【错误】。
|
| 210 |
+
- 预测答案“大约3500米”和“超过3000米”被视为【未尝试】,因为它们既不确认也不与标准答案矛盾。
|
| 211 |
+
- 如果标准答案包含比问题更多的信息,预测答案只需包含问题中提到的信息。
|
| 212 |
+
- 例如,考虑问题“菱镁矿的主要化学成分是什么?”标准答案为“碳酸镁(MgCO3)”。“碳酸镁”或“MgCO3”均视为【正确】答案。
|
| 213 |
+
- 如果从问题中明显可以推断出预测答案省略的信息,那么算作正确。
|
| 214 |
+
- 例如,问题“巴鲁米尼的努拉吉遗迹在1997年被联合国教科文组织列为世界文化遗产,那么这遗址在哪个地区?”标准答案为“意大利撒丁岛”,预测答案“撒丁岛”被视为【正确】。
|
| 215 |
+
- 如果能明显看出名字翻译版本不同但是是同一个人也认为正确。
|
| 216 |
+
- 例如,如果标准答案是“Robinson”,那么回答鲁滨逊或者鲁滨孙均正确。
|
| 217 |
+
|
| 218 |
+
下面是一个新的问题示例。请只回复A、B、C之一,不要道歉或纠正自己的错误,只需要评估该回答。
|
| 219 |
+
```
|
| 220 |
+
问题: {question}
|
| 221 |
+
正确答案: {target}
|
| 222 |
+
预测答案: {predicted_answer}
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
将此新问题的预测答案评定为以下之一:
|
| 226 |
+
A:【正确】
|
| 227 |
+
B:【错误】
|
| 228 |
+
C:【未尝试】
|
| 229 |
+
|
| 230 |
+
请返回字母"A"、"B"或"C"。 请按照以下格式生成你的最终评定等级:\\boxed{{字母}}.\n\n
|
| 231 |
+
""".strip()
|
| 232 |
+
|
| 233 |
+
GPT_PROMPT = """You will receive a question along with a reference answer and a predicted answer. Your task is to evaluate the accuracy of the predicted answer and provide a concise explanation.
|
| 234 |
+
|
| 235 |
+
Compare the predicted answer to the reference answer to determine its correctness.
|
| 236 |
+
|
| 237 |
+
**Guidelines**
|
| 238 |
+
- The criteria for evaluating the predicted answer should not be overly strict. If the predicted answer's meaning aligns closely with that of the reference answer, it can be deemed correct.
|
| 239 |
+
- For each question, provide a brief explanation of your reasoning, followed by "Correct" or "Incorrect." Include your final assessment within <assessment> tags.
|
| 240 |
+
|
| 241 |
+
**Output Format**
|
| 242 |
+
[Explanation]: Provide a brief explanation supporting your judgment.
|
| 243 |
+
[Assessment]: Provide your assessment **within <assessment> tags**.
|
| 244 |
+
|
| 245 |
+
Here is the question:
|
| 246 |
+
{question}
|
| 247 |
+
|
| 248 |
+
Here is the reference answer:
|
| 249 |
+
{target}
|
| 250 |
+
|
| 251 |
+
Here is the predicted answer:
|
| 252 |
+
{predicted_answer}
|
| 253 |
+
""".strip()
|
| 254 |
+
|
| 255 |
+
def load_model_and_tokenizer(model_path):
|
| 256 |
+
if 'gpt' in model_path:
|
| 257 |
+
return model_path, None
|
| 258 |
+
"""
|
| 259 |
+
加载 vLLM 模型和分词器。
|
| 260 |
+
"""
|
| 261 |
+
print(f"Loading tokenizer from {model_path}...")
|
| 262 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 263 |
+
if tokenizer.pad_token is None:
|
| 264 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 265 |
+
tokenizer.padding_side = 'left'
|
| 266 |
+
print("Tokenizer loaded successfully.")
|
| 267 |
+
|
| 268 |
+
print(f"Loading model from {model_path}...")
|
| 269 |
+
llm = LLM(
|
| 270 |
+
model=model_path,
|
| 271 |
+
tensor_parallel_size=torch.cuda.device_count(),
|
| 272 |
+
gpu_memory_utilization=0.95,
|
| 273 |
+
)
|
| 274 |
+
print("Model loaded successfully.")
|
| 275 |
+
return llm, tokenizer
|
| 276 |
+
|
| 277 |
+
def llm_as_judge(llm, tokenizer, questions: List[str], targets: List[str], predicted_answers: List[str], language: str = "en") -> List[str]:
|
| 278 |
+
"""
|
| 279 |
+
使用LLM作为裁判,批量评估预测答案。
|
| 280 |
+
"""
|
| 281 |
+
template = GRADER_TEMPLATE if language == "en" else CHINESE_GRADER_TEMPLATE
|
| 282 |
+
template = GPT_PROMPT if tokenizer is None else template
|
| 283 |
+
prompts = [
|
| 284 |
+
template.format(question=q, target=t, predicted_answer=p)
|
| 285 |
+
for q, t, p in zip(questions, targets, predicted_answers)
|
| 286 |
+
]
|
| 287 |
+
if tokenizer is None:
|
| 288 |
+
num_processes = min(min(cpu_count(), len(prompts)), 24) # Use CPU cores or task count, whichever is smaller
|
| 289 |
+
start_time = time.time()
|
| 290 |
+
inputs = [
|
| 291 |
+
{
|
| 292 |
+
'model_doc_reason_path': llm,
|
| 293 |
+
'prompt': prompt,
|
| 294 |
+
}
|
| 295 |
+
for prompt in prompts]
|
| 296 |
+
|
| 297 |
+
with Pool(processes=num_processes) as pool:
|
| 298 |
+
generated_texts = list(
|
| 299 |
+
tqdm(
|
| 300 |
+
pool.imap(process_prompt, inputs),
|
| 301 |
+
total=len(prompts),
|
| 302 |
+
desc="Generate webpage analyses",
|
| 303 |
+
)
|
| 304 |
+
)
|
| 305 |
+
else:
|
| 306 |
+
|
| 307 |
+
# 应用聊天模板
|
| 308 |
+
prompts = [
|
| 309 |
+
tokenizer.apply_chat_template([{"role": "user", "content": p}], tokenize=False, add_generation_prompt=True)
|
| 310 |
+
for p in prompts
|
| 311 |
+
]
|
| 312 |
+
|
| 313 |
+
# 生成参数
|
| 314 |
+
sampling_params = SamplingParams(
|
| 315 |
+
max_tokens=10000,
|
| 316 |
+
temperature=0.0,
|
| 317 |
+
top_p=1.0,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
# 批量生成结果
|
| 321 |
+
start_time = time.time()
|
| 322 |
+
outputs = llm.generate(prompts, sampling_params=sampling_params)
|
| 323 |
+
end_time = time.time()
|
| 324 |
+
elapsed_time = end_time - start_time
|
| 325 |
+
print(f"Generated {len(outputs)} responses in {elapsed_time:.2f} seconds.")
|
| 326 |
+
|
| 327 |
+
# 提取生成文本
|
| 328 |
+
generated_texts = [extract_answer(out.outputs[0].text) for out in outputs]
|
| 329 |
+
return generated_texts
|
| 330 |
+
|
| 331 |
+
def evaluate_questions(
|
| 332 |
+
llm,
|
| 333 |
+
tokenizer,
|
| 334 |
+
data: List[Dict],
|
| 335 |
+
language: str = "en"
|
| 336 |
+
) -> Tuple[List[Dict], Dict]:
|
| 337 |
+
"""
|
| 338 |
+
评估所有问题并计算指标。
|
| 339 |
+
"""
|
| 340 |
+
results = []
|
| 341 |
+
metrics = {
|
| 342 |
+
"is_correct": 0,
|
| 343 |
+
"is_incorrect": 0,
|
| 344 |
+
"is_not_attempted": 0,
|
| 345 |
+
}
|
| 346 |
+
error_cnt = 0
|
| 347 |
+
source_metrics = {}
|
| 348 |
+
|
| 349 |
+
# 提取问题、目标答案和预测答案
|
| 350 |
+
questions = [sample["item"]["Question"] for sample in data]
|
| 351 |
+
|
| 352 |
+
answer_type_str = 0
|
| 353 |
+
# 如果Answer不是list
|
| 354 |
+
for sample in data:
|
| 355 |
+
if isinstance(sample["item"]["answer"], str):
|
| 356 |
+
sample["item"]["answer"] = [sample["item"]["answer"]]
|
| 357 |
+
answer_type_str += 1
|
| 358 |
+
|
| 359 |
+
print(f"Answer is str type: {answer_type_str}")
|
| 360 |
+
|
| 361 |
+
targets = ['\t'.join(sample["item"]["answer"]) for sample in data]
|
| 362 |
+
predicted_answers = [extract_answer(sample["prompt"]) for sample in data]
|
| 363 |
+
|
| 364 |
+
# 批量评估
|
| 365 |
+
grades = llm_as_judge(llm, tokenizer, questions, targets, predicted_answers, language)
|
| 366 |
+
|
| 367 |
+
# 更新结果和指标
|
| 368 |
+
for sample, grade in zip(data, grades):
|
| 369 |
+
question = sample["item"]["Question"]
|
| 370 |
+
target = '\t'.join(sample["item"]["answer"])
|
| 371 |
+
predicted_answer = extract_answer(sample["prompt"])
|
| 372 |
+
|
| 373 |
+
source = sample["item"].get("source", "Unknown")
|
| 374 |
+
if source not in source_metrics:
|
| 375 |
+
source_metrics[source] = {
|
| 376 |
+
"is_correct": 0,
|
| 377 |
+
"is_incorrect": 0,
|
| 378 |
+
"is_not_attempted": 0,
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
if tokenizer is None:
|
| 382 |
+
if "incorrect" in grade.lower():
|
| 383 |
+
metrics["is_incorrect"] += 1
|
| 384 |
+
source_metrics[source]["is_incorrect"] += 1
|
| 385 |
+
else:
|
| 386 |
+
metrics["is_correct"] += 1
|
| 387 |
+
source_metrics[source]["is_correct"] += 1
|
| 388 |
+
else:
|
| 389 |
+
if grade == "A":
|
| 390 |
+
metrics["is_correct"] += 1
|
| 391 |
+
source_metrics[source]["is_correct"] += 1
|
| 392 |
+
elif grade == "B":
|
| 393 |
+
metrics["is_incorrect"] += 1
|
| 394 |
+
source_metrics[source]["is_incorrect"] += 1
|
| 395 |
+
elif grade == "C":
|
| 396 |
+
metrics["is_not_attempted"] += 1
|
| 397 |
+
source_metrics[source]["is_not_attempted"] += 1
|
| 398 |
+
else:
|
| 399 |
+
# raise ValueError(f"Invalid grade: {grade}")
|
| 400 |
+
error_cnt += 1
|
| 401 |
+
metrics["is_incorrect"] += 1
|
| 402 |
+
source_metrics[source]["is_incorrect"] += 1
|
| 403 |
+
|
| 404 |
+
results.append({
|
| 405 |
+
"question": question,
|
| 406 |
+
"targets": target,
|
| 407 |
+
"predicted_answer": predicted_answer,
|
| 408 |
+
"grade": grade,
|
| 409 |
+
})
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
# 计算汇总指标
|
| 413 |
+
total = len(data)
|
| 414 |
+
for key in metrics:
|
| 415 |
+
metrics[key] = metrics[key] / total
|
| 416 |
+
metrics["is_given_attempted"] = metrics["is_correct"] + metrics["is_incorrect"]
|
| 417 |
+
metrics["accuracy_given_attempted"] = (
|
| 418 |
+
metrics["is_correct"] / metrics["is_given_attempted"]
|
| 419 |
+
if metrics["is_given_attempted"] > 0 else 0
|
| 420 |
+
)
|
| 421 |
+
metrics["f1"] = (
|
| 422 |
+
2 * metrics["accuracy_given_attempted"] * metrics["is_correct"]
|
| 423 |
+
/ (metrics["accuracy_given_attempted"] + metrics["is_correct"])
|
| 424 |
+
if (metrics["accuracy_given_attempted"] + metrics["is_correct"]) > 0 else 0
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
# 计算各个source
|
| 429 |
+
|
| 430 |
+
for src in source_metrics:
|
| 431 |
+
total_num = 0
|
| 432 |
+
for metric, value in source_metrics[src].items():
|
| 433 |
+
total_num += source_metrics[src][metric]
|
| 434 |
+
print(f"{src}: {total_num}")
|
| 435 |
+
source_metrics[src]['total_num'] = total_num
|
| 436 |
+
|
| 437 |
+
for key in source_metrics[src]:
|
| 438 |
+
source_metrics[src][key] = source_metrics[src][key] / total_num
|
| 439 |
+
source_metrics[src]['total_num'] = total_num
|
| 440 |
+
source_metrics[src]["is_given_attempted"] = source_metrics[src]["is_correct"] + source_metrics[src]["is_incorrect"]
|
| 441 |
+
source_metrics[src]["accuracy_given_attempted"] = (
|
| 442 |
+
source_metrics[src]["is_correct"] / source_metrics[src]["is_given_attempted"]
|
| 443 |
+
if source_metrics[src]["is_given_attempted"] > 0 else 0
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
metrics["f1"] = (
|
| 447 |
+
2 * source_metrics[src]["accuracy_given_attempted"] * source_metrics[src]["is_correct"]
|
| 448 |
+
/ (source_metrics[src]["accuracy_given_attempted"] + source_metrics[src]["is_correct"])
|
| 449 |
+
if (source_metrics[src]["accuracy_given_attempted"] + source_metrics[src]["is_correct"]) > 0 else 0
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
final_metrics = {'overall': metrics, 'per_source': source_metrics}
|
| 453 |
+
|
| 454 |
+
return results, final_metrics
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def main():
|
| 458 |
+
parser = argparse.ArgumentParser(description="Evaluate model predictions using LLM as a judge.")
|
| 459 |
+
parser.add_argument("--model_name", type=str, default='gpt-4o-mini', help="Name of the evalution model.")
|
| 460 |
+
parser.add_argument("--model_path", type=str, default='gpt-4o-mini', help="Path to the evalution model.")
|
| 461 |
+
parser.add_argument("--input_file", type=str, default='/share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_musique_syn/turn_13.json', help="Path to the input JSON file.")
|
| 462 |
+
# parser.add_argument("--output_results", type=str, default='/share/project/wyh/extract/gpt-4o-result.json', help="Path to save individual results.")
|
| 463 |
+
# parser.add_argument("--output_metrics", type=str, default='/share/project/wyh/extract/gpt-4o-metrics.json', help="Path to save aggregated metrics.")
|
| 464 |
+
parser.add_argument("--language", type=str, default="en", choices=["en", "zh"], help="Language of the evaluation.")
|
| 465 |
+
|
| 466 |
+
args = parser.parse_args()
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
base_dir = os.path.dirname(args.input_file)
|
| 470 |
+
output_results = os.path.join(base_dir, f"result_{args.model_name}.json")
|
| 471 |
+
output_metrics = os.path.join(base_dir, f"metrics_{args.model_name}.json")
|
| 472 |
+
|
| 473 |
+
# 加载模型和分词器
|
| 474 |
+
llm, tokenizer = load_model_and_tokenizer(args.model_path)
|
| 475 |
+
|
| 476 |
+
# 加载数据
|
| 477 |
+
with open(args.input_file, "r", encoding="utf-8") as f:
|
| 478 |
+
data = json.load(f)
|
| 479 |
+
|
| 480 |
+
# 评估问题
|
| 481 |
+
results, metrics = evaluate_questions(llm, tokenizer, data, language=args.language)
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
# 保存逐个问题的结果
|
| 485 |
+
with open(output_results, "w", encoding="utf-8") as f:
|
| 486 |
+
json.dump(results, f, ensure_ascii=False, indent=4)
|
| 487 |
+
|
| 488 |
+
# 保存汇总指标
|
| 489 |
+
with open(output_metrics, "w", encoding="utf-8") as f:
|
| 490 |
+
json.dump(metrics, f, ensure_ascii=False, indent=4)
|
| 491 |
+
|
| 492 |
+
print("Evaluation completed. Results saved to:", output_results)
|
| 493 |
+
print("Metrics saved to:", output_metrics)
|
| 494 |
+
|
| 495 |
+
if __name__ == "__main__":
|
| 496 |
+
main()
|
deep_search/search_o1/scripts/prompts.py
ADDED
|
@@ -0,0 +1,667 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
def get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT):
|
| 4 |
+
return (
|
| 5 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 6 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 7 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 8 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 9 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 10 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 11 |
+
"Example:\n"
|
| 12 |
+
"Question: \"What is the energy range of pp III neutrinos?\"\n"
|
| 13 |
+
"Assistant thinking steps:\n"
|
| 14 |
+
"- I might need to look up details about pp III neutrinos.\n\n"
|
| 15 |
+
"Assistant:\n"
|
| 16 |
+
"<|begin_search_query|>pp III neutrino energy spectrum<|end_search_query|>\n\n"
|
| 17 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 18 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 19 |
+
"Remember:\n"
|
| 20 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 21 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_math_search_o1_instruction(MAX_SEARCH_LIMIT):
|
| 26 |
+
return (
|
| 27 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 28 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 29 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 30 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 31 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 32 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 33 |
+
"Example:\n"
|
| 34 |
+
"Question: \"How do you compute the integral of e^(x^2) dx?\"\n"
|
| 35 |
+
"Assistant thinking steps:\n"
|
| 36 |
+
"- I might need to look up techniques for integrating e^(x^2).\n\n"
|
| 37 |
+
"Assistant:\n"
|
| 38 |
+
"<|begin_search_query|>methods to integrate e^(x^2)<|end_search_query|>\n\n"
|
| 39 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 40 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 41 |
+
"Remember:\n"
|
| 42 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 43 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_math_search_o1_instruction_1(MAX_SEARCH_LIMIT):
|
| 49 |
+
return (
|
| 50 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 51 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 52 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 53 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 54 |
+
f"Whenever you encounter a topic, fact, or piece of information you are uncertain about or need further details on, please perform a search to gather more accurate, up-to-date, or specific information. You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 55 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 56 |
+
"Example:\n"
|
| 57 |
+
"Question: \"How do you compute the integral of e^(x^2) dx?\"\n"
|
| 58 |
+
"Assistant thinking steps:\n"
|
| 59 |
+
"- I might need to look up techniques for integrating e^(x^2).\n\n"
|
| 60 |
+
"Assistant:\n"
|
| 61 |
+
"<|begin_search_query|>methods to integrate e^(x^2)<|end_search_query|>\n\n"
|
| 62 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 63 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 64 |
+
"Remember:\n"
|
| 65 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 66 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_code_search_o1_instruction(MAX_SEARCH_LIMIT):
|
| 71 |
+
return (
|
| 72 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 73 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 74 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 75 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 76 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 77 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 78 |
+
"Example:\n"
|
| 79 |
+
"Question: \"Find the minimum number of vertices in a Steiner tree that includes all specified vertices in a given tree.\"\n"
|
| 80 |
+
"Assistant thinking steps:\n"
|
| 81 |
+
"- I need to understand what a Steiner tree is and how to compute the minimum number of vertices required to include all specified vertices in a given tree.\n\n"
|
| 82 |
+
"Assistant:\n"
|
| 83 |
+
"<|begin_search_query|>Minimum Steiner Tree problem in trees<|end_search_query|>\n\n"
|
| 84 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 85 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 86 |
+
"Remember:\n"
|
| 87 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 88 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def get_webpage_to_reasonchain_instruction(prev_reasoning, search_query, document):
|
| 93 |
+
return f"""**Task Instruction:**
|
| 94 |
+
|
| 95 |
+
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
| 96 |
+
|
| 97 |
+
**Guidelines:**
|
| 98 |
+
|
| 99 |
+
1. **Analyze the Searched Web Pages:**
|
| 100 |
+
- Carefully review the content of each searched web page.
|
| 101 |
+
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
| 102 |
+
|
| 103 |
+
2. **Extract Relevant Information:**
|
| 104 |
+
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
| 105 |
+
- Ensure that the extracted information is accurate and relevant.
|
| 106 |
+
|
| 107 |
+
3. **Output Format:**
|
| 108 |
+
- **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
|
| 109 |
+
**Final Information**
|
| 110 |
+
|
| 111 |
+
[Helpful information]
|
| 112 |
+
|
| 113 |
+
- **If the web pages do not provide any helpful information for current search query:** Output the following text.
|
| 114 |
+
|
| 115 |
+
**Final Information**
|
| 116 |
+
|
| 117 |
+
No helpful information found.
|
| 118 |
+
|
| 119 |
+
**Inputs:**
|
| 120 |
+
- **Previous Reasoning Steps:**
|
| 121 |
+
{prev_reasoning}
|
| 122 |
+
|
| 123 |
+
- **Current Search Query:**
|
| 124 |
+
{search_query}
|
| 125 |
+
|
| 126 |
+
- **Searched Web Pages:**
|
| 127 |
+
{document}
|
| 128 |
+
|
| 129 |
+
Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def get_webpage_to_reasonchain_instruction_1(prev_reasoning, search_query, document):
|
| 134 |
+
return f"""**Task Instruction:**
|
| 135 |
+
|
| 136 |
+
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
| 137 |
+
|
| 138 |
+
**Guidelines:**
|
| 139 |
+
|
| 140 |
+
1. **Analyze the Searched Web Pages:**
|
| 141 |
+
- Carefully review the content of each searched web page.
|
| 142 |
+
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
| 143 |
+
|
| 144 |
+
2. **Extract Relevant Information:**
|
| 145 |
+
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
| 146 |
+
- Ensure that the extracted information is accurate and relevant.
|
| 147 |
+
|
| 148 |
+
3. **Output Format:**
|
| 149 |
+
- Present the helpful information for current search query: beginning with `**Final Information**` as shown below.
|
| 150 |
+
**Final Information**
|
| 151 |
+
|
| 152 |
+
[Helpful information]
|
| 153 |
+
|
| 154 |
+
**Inputs:**
|
| 155 |
+
- **Previous Reasoning Steps:**
|
| 156 |
+
{prev_reasoning}
|
| 157 |
+
|
| 158 |
+
- **Current Search Query:**
|
| 159 |
+
{search_query}
|
| 160 |
+
|
| 161 |
+
- **Searched Web Pages:**
|
| 162 |
+
{document}
|
| 163 |
+
|
| 164 |
+
Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def get_singleqa_search_o1_instruction(MAX_SEARCH_LIMIT): # 给出样例是进行单次搜索
|
| 169 |
+
return (
|
| 170 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 171 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 172 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 173 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 174 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 175 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 176 |
+
"Example:\n"
|
| 177 |
+
"Question: \"Who got the first Nobel Prize in Physics?\"\n"
|
| 178 |
+
"Assistant thinking steps:\n"
|
| 179 |
+
"- I need to find out who was awarded the first Nobel Prize in Physics.\n\n"
|
| 180 |
+
"Assistant:\n"
|
| 181 |
+
"<|begin_search_query|>first Nobel Prize in Physics winner<|end_search_query|>\n\n"
|
| 182 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 183 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 184 |
+
"Remember:\n"
|
| 185 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 186 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# When you encounter information that is not available in your knowledge base or need to update real-time data, you can search the Ethernet.
|
| 192 |
+
def get_singleqa_search_o1_instruction_1(MAX_SEARCH_LIMIT): # 给出样例是进行单次搜索
|
| 193 |
+
return (
|
| 194 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 195 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 196 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 197 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 198 |
+
f"Whenever you encounter a topic, fact, or piece of information you are uncertain about or need further details on, please perform a search to gather more accurate, up-to-date, or specific information. You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 199 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 200 |
+
"Example:\n"
|
| 201 |
+
"Question: \"Who got the first Nobel Prize in Physics?\"\n"
|
| 202 |
+
"Assistant thinking steps:\n"
|
| 203 |
+
"- I need to find out who was awarded the first Nobel Prize in Physics.\n\n"
|
| 204 |
+
"Assistant:\n"
|
| 205 |
+
"<|begin_search_query|>first Nobel Prize in Physics winner<|end_search_query|>\n\n"
|
| 206 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 207 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 208 |
+
"Remember:\n"
|
| 209 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 210 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# When you encounter information that is not available in your knowledge base or need to update real-time data, you can search the Ethernet.
|
| 215 |
+
def get_singleqa_search_o1_instruction_2(MAX_SEARCH_LIMIT): # 给出样例是进行单次搜索
|
| 216 |
+
return (
|
| 217 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 218 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 219 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 220 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 221 |
+
f"When you encounter information that is not available in your knowledge base or need to update real-time data, you can search the Ethernet. You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 222 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 223 |
+
"Example:\n"
|
| 224 |
+
"Question: \"Who got the first Nobel Prize in Physics?\"\n"
|
| 225 |
+
"Assistant thinking steps:\n"
|
| 226 |
+
"- I need to find out who was awarded the first Nobel Prize in Physics.\n\n"
|
| 227 |
+
"Assistant:\n"
|
| 228 |
+
"<|begin_search_query|>first Nobel Prize in Physics winner<|end_search_query|>\n\n"
|
| 229 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 230 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 231 |
+
"Remember:\n"
|
| 232 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 233 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
def get_multiqa_search_o1_instruction(MAX_SEARCH_LIMIT): # 给出的样例是进行了两次搜索
|
| 237 |
+
return (
|
| 238 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 239 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 240 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 241 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 242 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 243 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 244 |
+
"Example:\n"
|
| 245 |
+
"Question: \"Alice David is the voice of Lara Croft in a video game developed by which company?\"\n"
|
| 246 |
+
"Assistant thinking steps:\n"
|
| 247 |
+
"- I need to find out who voices Lara Croft in the video game.\n"
|
| 248 |
+
"- Then, I need to determine which company developed that video game.\n\n"
|
| 249 |
+
"Assistant:\n"
|
| 250 |
+
"<|begin_search_query|>Alice David Lara Croft voice<|end_search_query|>\n\n"
|
| 251 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 252 |
+
"Assistant thinks: The search results indicate that Alice David is the voice of Lara Croft in a specific video game. Now, I need to find out which company developed that game.\n\n"
|
| 253 |
+
"Assistant:\n"
|
| 254 |
+
"<|begin_search_query|>video game developed by Alice David Lara Croft<|end_search_query|>\n\n"
|
| 255 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 256 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 257 |
+
"Remember:\n"
|
| 258 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 259 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def get_multiqa_search_o1_instruction_1(MAX_SEARCH_LIMIT): # 给出的样例是进行了两次搜索
|
| 265 |
+
return (
|
| 266 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 267 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 268 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 269 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 270 |
+
f"Whenever you encounter a topic, fact, or piece of information you are uncertain about or need further details on, please perform a search to gather more accurate, up-to-date, or specific information. You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 271 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 272 |
+
"Example:\n"
|
| 273 |
+
"Question: \"Alice David is the voice of Lara Croft in a video game developed by which company?\"\n"
|
| 274 |
+
"Assistant thinking steps:\n"
|
| 275 |
+
"- I need to find out who voices Lara Croft in the video game.\n"
|
| 276 |
+
"- Then, I need to determine which company developed that video game.\n\n"
|
| 277 |
+
"Assistant:\n"
|
| 278 |
+
"<|begin_search_query|>Alice David Lara Croft voice<|end_search_query|>\n\n"
|
| 279 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 280 |
+
"Assistant thinks: The search results indicate that Alice David is the voice of Lara Croft in a specific video game. Now, I need to find out which company developed that game.\n\n"
|
| 281 |
+
"Assistant:\n"
|
| 282 |
+
"<|begin_search_query|>video game developed by Alice David Lara Croft<|end_search_query|>\n\n"
|
| 283 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 284 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 285 |
+
"Remember:\n"
|
| 286 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 287 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def get_multiqa_search_o1_instruction_4(MAX_SEARCH_LIMIT): # 给出的样例是进行了两次搜索
|
| 293 |
+
return (
|
| 294 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 295 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 296 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 297 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 298 |
+
f"Whenever you encounter a topic, fact, or piece of information you are uncertain about or need further details on, please perform a search to gather more accurate, up-to-date, or specific information. You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 299 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 300 |
+
"Remember:\n"
|
| 301 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 302 |
+
"- When done searching, continue your reasoning.\n"
|
| 303 |
+
"- Do not generate <|begin_search_result|> and <|end_search_result|> tags yourself.\n\n"
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def get_multiqa_search_o1_instruction_date(MAX_SEARCH_LIMIT): # 给出的样例是进行了两次搜索
|
| 308 |
+
return (
|
| 309 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 310 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 311 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 312 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 313 |
+
f"Whenever you encounter a topic, fact, or piece of information you are uncertain about or need further details on, please perform a search to gather more accurate, up-to-date, or specific information. You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 314 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 315 |
+
"Remember:\n"
|
| 316 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 317 |
+
"- When done searching, continue your reasoning.\n"
|
| 318 |
+
"- Do not generate <|begin_search_result|> and <|end_search_result|> tags yourself.\n"
|
| 319 |
+
"- The current date is December 31, 2018.\n\n"
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def get_multiqa_search_o1_instruction_3(MAX_SEARCH_LIMIT): # 给出的样例是进行了两次搜索
|
| 326 |
+
return (
|
| 327 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 328 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 329 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 330 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 331 |
+
"Whenever you encounter a topic, fact, or piece of information you are uncertain about or need further details on, you should first perform a search operation and then reason based on the information returned by the search tool.\n"
|
| 332 |
+
"When generating search queries based on a question, you can follow the pattern below:\n"
|
| 333 |
+
"**1. Decomposition**: Break down complex questions into simpler, more specific queries and solve them step by step.\n"
|
| 334 |
+
"**2. Rewriting**: When search results provide valuable information, adjust or rephrase the query to improve search relevance and effectiveness.\n"
|
| 335 |
+
"**3. Verification**: Cross-check information from multiple sources to ensure accuracy.\n"
|
| 336 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 337 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 338 |
+
"Example:\n"
|
| 339 |
+
"Question: \"Alice David is the voice of Lara Croft in a video game developed by which company?\"\n"
|
| 340 |
+
"Assistant thinking steps:\n"
|
| 341 |
+
"- I need to find out who voices Lara Croft in the video game.\n"
|
| 342 |
+
"- Then, I need to determine which company developed that video game.\n\n"
|
| 343 |
+
"Assistant:\n"
|
| 344 |
+
"<|begin_search_query|>Alice David Lara Croft voice<|end_search_query|>\n\n"
|
| 345 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 346 |
+
"Assistant thinks: The search results indicate that Alice David is the voice of Lara Croft in a specific video game. Now, I need to find out which company developed that game.\n\n"
|
| 347 |
+
"Assistant:\n"
|
| 348 |
+
"<|begin_search_query|>video game developed by Alice David Lara Croft<|end_search_query|>\n\n"
|
| 349 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 350 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 351 |
+
"Remember:\n"
|
| 352 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 353 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def get_multiqa_search_o1_instruction_for_rag_rl(question): # 给出的样例是进行了两次搜索
|
| 359 |
+
base_prompt_v0 = f"""The User asks a question, and the Assistant solves it.
|
| 360 |
+
The Assistant first thinks about the reasoning process in the mind and then provides the User with the final answer.
|
| 361 |
+
The output format of reasoning process and final answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., "<think> reasoning process here </think>\n\n<answer> final answer here </answer>".
|
| 362 |
+
During the thinking process, the Assistant can perform searching for uncertain knowledge if necessary with the format of "<|begin_of_query|> search query (only keywords) here <|end_of_query|>".
|
| 363 |
+
The proposed query must search for a straightforward sub-question. Furthermore, **the query must involve ONLY a single triple**.
|
| 364 |
+
Then, the system will provide the Assistant with helpful information with the format of "<|begin_of_documents|> ...search results... <|end_of_documents|>".\n\nUser:{question}\nAssistant: <think>"""
|
| 365 |
+
return base_prompt_v0
|
| 366 |
+
|
| 367 |
+
def get_multiqa_search_o1_instruction_2(MAX_SEARCH_LIMIT): # 给出的样例是进行了两次搜索
|
| 368 |
+
return (
|
| 369 |
+
"You are a reasoning assistant with the ability to perform web searches to help "
|
| 370 |
+
"you answer the user's question accurately. You have special tools:\n\n"
|
| 371 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 372 |
+
"Then, the system will search and analyze relevant web pages, then provide you with helpful information in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n\n"
|
| 373 |
+
f"When you encounter information that is not available in your knowledge base or need to update real-time data, you can search the Ethernet. You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
| 374 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 375 |
+
"Example:\n"
|
| 376 |
+
"Question: \"Alice David is the voice of Lara Croft in a video game developed by which company?\"\n"
|
| 377 |
+
"Assistant thinking steps:\n"
|
| 378 |
+
"- I need to find out who voices Lara Croft in the video game.\n"
|
| 379 |
+
"- Then, I need to determine which company developed that video game.\n\n"
|
| 380 |
+
"Assistant:\n"
|
| 381 |
+
"<|begin_search_query|>Alice David Lara Croft voice<|end_search_query|>\n\n"
|
| 382 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 383 |
+
"Assistant thinks: The search results indicate that Alice David is the voice of Lara Croft in a specific video game. Now, I need to find out which company developed that game.\n\n"
|
| 384 |
+
"Assistant:\n"
|
| 385 |
+
"<|begin_search_query|>video game developed by Alice David Lara Croft<|end_search_query|>\n\n"
|
| 386 |
+
"(System returns processed information from relevant web pages)\n\n"
|
| 387 |
+
"Assistant continues reasoning with the new information...\n\n"
|
| 388 |
+
"Remember:\n"
|
| 389 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 390 |
+
"- When done searching, continue your reasoning.\n\n"
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def get_singleqa_rag_agent_instruction(MAX_SEARCH_LIMIT, MAX_URL_FETCH):
|
| 395 |
+
return (
|
| 396 |
+
"You are a reasoning assistant with the ability to perform web searches and retrieve webpage content to help "
|
| 397 |
+
"you answer the user’s question accurately. You have special tools:\n\n"
|
| 398 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 399 |
+
"Then, the system will call the web search API with your query and return the search results to you in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n"
|
| 400 |
+
" The search results will contain a list of webpages with titles, URLs, and snippets (but not full content).\n\n"
|
| 401 |
+
"- After receiving the search results, if you need more detailed information from one or more specific URLs, write <|begin_url|> url1, url2, ... <|end_url|>.\n"
|
| 402 |
+
" The system will fetch the full page content of those URLs and return it to you as <|begin_full_page|> ...full page content... <|end_full_page|>.\n\n"
|
| 403 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n"
|
| 404 |
+
f"You can fetch up to {MAX_URL_FETCH} URLs for detailed information.\n\n"
|
| 405 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 406 |
+
"Example:\n"
|
| 407 |
+
"Question: \"Who got the first Nobel Prize in Physics?\"\n"
|
| 408 |
+
"Assistant thinking steps:\n"
|
| 409 |
+
"- I need to find out who was awarded the first Nobel Prize in Physics.\n\n"
|
| 410 |
+
"Assistant:\n"
|
| 411 |
+
"<|begin_search_query|>first Nobel Prize in Physics winner<|end_search_query|>\n\n"
|
| 412 |
+
"(System returns search results)\n\n"
|
| 413 |
+
"Assistant:\n"
|
| 414 |
+
"<|begin_search_result|> ...search results without full page... <|end_search_result|>\n\n"
|
| 415 |
+
"Assistant thinks: The search results mention several URLs. I want full details from one of them.\n\n"
|
| 416 |
+
"Assistant:\n"
|
| 417 |
+
"<|begin_url|>http://example.com/first_nobel_physics.html<|end_url|>\n\n"
|
| 418 |
+
"(System returns full page content)\n\n"
|
| 419 |
+
"Assistant:\n"
|
| 420 |
+
"<|begin_full_page|> ...full page content... <|end_full_page|>\n\n"
|
| 421 |
+
"Now the assistant has enough info and can continue reasoning.\n\n"
|
| 422 |
+
"Remember:\n"
|
| 423 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 424 |
+
"- Use <|begin_url|> to request full page content and end with <|end_url|>.\n"
|
| 425 |
+
"- When done retrieving information, continue your reasoning.\n\n"
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def get_multiqa_rag_agent_instruction(MAX_SEARCH_LIMIT, MAX_URL_FETCH):
|
| 430 |
+
return (
|
| 431 |
+
"You are a reasoning assistant with the ability to perform web searches and retrieve webpage content to help "
|
| 432 |
+
"you answer the user’s question accurately. You have special tools:\n\n"
|
| 433 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 434 |
+
"Then, the system will call the web search API with your query and return the search results to you in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n"
|
| 435 |
+
" The search results will contain a list of webpages with titles, URLs, and snippets (but not full content).\n\n"
|
| 436 |
+
"- After receiving the search results, if you need more detailed information from one or more specific URLs, write <|begin_url|> url1, url2, ... <|end_url|>.\n"
|
| 437 |
+
" The system will fetch the full page content of those URLs and return it to you as <|begin_full_page|> ...full page content... <|end_full_page|>.\n\n"
|
| 438 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n"
|
| 439 |
+
f"You can fetch up to {MAX_URL_FETCH} URLs for detailed information.\n\n"
|
| 440 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 441 |
+
"Example:\n"
|
| 442 |
+
"Question: \"Alice David is the voice of Lara Croft in a video game developed by which company?\"\n"
|
| 443 |
+
"Assistant thinking steps:\n"
|
| 444 |
+
"- I need to find out who voices Lara Croft in the video game.\n"
|
| 445 |
+
"- Then, I need to determine which company developed that video game.\n\n"
|
| 446 |
+
"Assistant:\n"
|
| 447 |
+
"<|begin_search_query|>voice actor of Lara Croft<|end_search_query|>\n\n"
|
| 448 |
+
"(System returns search results)\n\n"
|
| 449 |
+
"Assistant:\n"
|
| 450 |
+
"<|begin_search_result|> ...search results without full page... <|end_search_result|>\n\n"
|
| 451 |
+
"Assistant thinks: The search results provide names of voice actors for Lara Croft. I need to confirm if Alice David is one of them.\n\n"
|
| 452 |
+
"Assistant:\n"
|
| 453 |
+
"<|begin_search_query|>Alice David Lara Croft voice<|end_search_query|>\n\n"
|
| 454 |
+
"(System returns search results)\n\n"
|
| 455 |
+
"Assistant:\n"
|
| 456 |
+
"<|begin_search_result|> ...search results without full page... <|end_search_result|>\n\n"
|
| 457 |
+
"Assistant thinks: The search results indicate that Alice David is the voice of Lara Croft in a specific video game. Now, I need to find out which company developed that game.\n\n"
|
| 458 |
+
"Assistant:\n"
|
| 459 |
+
"<|begin_search_query|>video game developed by Alice David Lara Croft<|end_search_query|>\n\n"
|
| 460 |
+
"(System returns search results)\n\n"
|
| 461 |
+
"Assistant:\n"
|
| 462 |
+
"<|begin_search_result|> ...search results without full page... <|end_search_result|>\n\n"
|
| 463 |
+
"Assistant thinks: The search results mention the company that developed the video game featuring Alice David as Lara Croft.\n\n"
|
| 464 |
+
"Assistant:\n"
|
| 465 |
+
"<|begin_url|>http://example.com/lara_croft_voice_actor.html, http://example.com/game_developer.html<|end_url|>\n\n"
|
| 466 |
+
"(System returns full page content)\n\n"
|
| 467 |
+
"Assistant:\n"
|
| 468 |
+
"<|begin_full_page|> ...full page content... <|end_full_page|>\n\n"
|
| 469 |
+
"Now the assistant has enough info and can continue reasoning.\n\n"
|
| 470 |
+
"Remember:\n"
|
| 471 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 472 |
+
"- Use <|begin_url|> to request full page content and end with <|end_url|>.\n"
|
| 473 |
+
"- When done retrieving information, continue your reasoning.\n\n"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def get_gpqa_rag_agent_instruction(MAX_SEARCH_LIMIT, MAX_URL_FETCH):
|
| 478 |
+
return (
|
| 479 |
+
"You are a reasoning assistant with the ability to perform web searches and retrieve webpage content to help "
|
| 480 |
+
"you answer the user’s question accurately. You have special tools:\n\n"
|
| 481 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 482 |
+
"Then, the system will call the web search API with your query and return the search results to you in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n"
|
| 483 |
+
" The search results will contain a list of webpages with titles, URLs, and snippets (but not full content).\n\n"
|
| 484 |
+
"- After receiving the search results, if you need more detailed information from one or more specific URLs, write <|begin_url|> url1, url2, ... <|end_url|>.\n"
|
| 485 |
+
" The system will fetch the full page content of those URLs and return it to you as <|begin_full_page|> ...full page content... <|end_full_page|>.\n\n"
|
| 486 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n"
|
| 487 |
+
f"You can fetch up to {MAX_URL_FETCH} URLs for detailed information.\n\n"
|
| 488 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 489 |
+
"Example:\n"
|
| 490 |
+
"Question: \"What is the energy range of pp III neutrinos?\"\n"
|
| 491 |
+
"Assistant thinking steps:\n"
|
| 492 |
+
"- I might need to look up details about pp III neutrinos.\n\n"
|
| 493 |
+
"Assistant:\n"
|
| 494 |
+
"<|begin_search_query|>pp III neutrino energy spectrum<|end_search_query|>\n\n"
|
| 495 |
+
"(System returns search results)\n\n"
|
| 496 |
+
"Assistant:\n"
|
| 497 |
+
"<|begin_search_result|> ...search results without full page... <|end_search_result|>\n\n"
|
| 498 |
+
"Assistant thinks: The search results mention some URLs. I want full details from one of them.\n\n"
|
| 499 |
+
"Assistant:\n"
|
| 500 |
+
"<|begin_url|>http://example.com/ppIII_neutrino.html<|end_url|>\n\n"
|
| 501 |
+
"(System returns full page content)\n\n"
|
| 502 |
+
"Assistant:\n"
|
| 503 |
+
"<|begin_full_page|> ...full page content... <|end_full_page|>\n\n"
|
| 504 |
+
"Now the assistant has enough info and can continue reasoning.\n\n"
|
| 505 |
+
"Remember:\n"
|
| 506 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 507 |
+
"- Use <|begin_url|> to request full page content and end with <|end_url|>.\n"
|
| 508 |
+
"- When done retrieving information, continue your reasoning.\n\n"
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def get_math_rag_agent_instruction(MAX_SEARCH_LIMIT, MAX_URL_FETCH):
|
| 513 |
+
return (
|
| 514 |
+
"You are a reasoning assistant with the ability to perform web searches and retrieve webpage content to help "
|
| 515 |
+
"you answer the user’s math-related question accurately. You have special tools:\n\n"
|
| 516 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 517 |
+
"Then, the system will call the web search API with your query and return the search results to you in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n"
|
| 518 |
+
" The search results will contain a list of webpages with titles, URLs, and snippets (but not full content).\n\n"
|
| 519 |
+
"- After receiving the search results, if you need more detailed information from one or more specific URLs, write <|begin_url|> url1, url2, ... <|end_url|>.\n"
|
| 520 |
+
" The system will fetch the full page content of those URLs and return it to you as <|begin_full_page|> ...full page content... <|end_full_page|>.\n\n"
|
| 521 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n"
|
| 522 |
+
f"You can fetch up to {MAX_URL_FETCH} URLs for detailed information.\n\n"
|
| 523 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 524 |
+
"Example:\n"
|
| 525 |
+
"Question: \"How do you compute the integral of e^(x^2) dx?\"\n"
|
| 526 |
+
"Assistant thinking steps:\n"
|
| 527 |
+
"- I might need to look up techniques for integrating e^(x^2).\n\n"
|
| 528 |
+
"Assistant:\n"
|
| 529 |
+
"<|begin_search_query|>methods to integrate e^(x^2)<|end_search_query|>\n\n"
|
| 530 |
+
"(System returns search results)\n\n"
|
| 531 |
+
"Assistant:\n"
|
| 532 |
+
"<|begin_search_result|> ...search results without full page... <|end_search_result|>\n\n"
|
| 533 |
+
"Assistant thinks: The search results mention some URLs. I want full details from one of them.\n\n"
|
| 534 |
+
"Assistant:\n"
|
| 535 |
+
"<|begin_url|>http://example.com/integration_e_x_squared.html<|end_url|>\n\n"
|
| 536 |
+
"(System returns full page content)\n\n"
|
| 537 |
+
"Assistant:\n"
|
| 538 |
+
"<|begin_full_page|> ...full page content... <|end_full_page|>\n\n"
|
| 539 |
+
"Now the assistant has enough info and can continue reasoning.\n\n"
|
| 540 |
+
"Remember:\n"
|
| 541 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 542 |
+
"- Use <|begin_url|> to request full page content and end with <|end_url|>.\n"
|
| 543 |
+
"- When done retrieving information, continue your reasoning.\n\n"
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def get_code_rag_agent_instruction(MAX_SEARCH_LIMIT, MAX_URL_FETCH):
|
| 548 |
+
return (
|
| 549 |
+
"You are a reasoning assistant with the ability to perform web searches and retrieve webpage content to help "
|
| 550 |
+
"you answer the user’s programming-related question accurately. You have special tools:\n\n"
|
| 551 |
+
"- To perform a search: write <|begin_search_query|> your query here <|end_search_query|>.\n"
|
| 552 |
+
"Then, the system will call the web search API with your query and return the search results to you in the format <|begin_search_result|> ...search results... <|end_search_result|>.\n"
|
| 553 |
+
" The search results will contain a list of webpages with titles, URLs, and snippets (but not full content).\n\n"
|
| 554 |
+
"- After receiving the search results, if you need more detailed information from one or more specific URLs, write <|begin_url|> url1, url2, ... <|end_url|>.\n"
|
| 555 |
+
" The system will fetch the full page content of those URLs and return it to you as <|begin_full_page|> ...full page content... <|end_full_page|>.\n\n"
|
| 556 |
+
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n"
|
| 557 |
+
f"You can fetch up to {MAX_URL_FETCH} URLs for detailed information.\n\n"
|
| 558 |
+
"Once you have all the information you need, continue your reasoning.\n\n"
|
| 559 |
+
"Example:\n"
|
| 560 |
+
"Question: \"How do I implement a binary search algorithm in Python?\"\n"
|
| 561 |
+
"Assistant thinking steps:\n"
|
| 562 |
+
"- I might need to look up the implementation details of binary search in Python.\n\n"
|
| 563 |
+
"Assistant:\n"
|
| 564 |
+
"<|begin_search_query|>binary search algorithm implementation in Python<|end_search_query|>\n\n"
|
| 565 |
+
"(System returns search results)\n\n"
|
| 566 |
+
"Assistant:\n"
|
| 567 |
+
"<|begin_search_result|> ...search results without full page... <|end_search_result|>\n\n"
|
| 568 |
+
"Assistant thinks: The search results mention some URLs. I want full details from one of them.\n\n"
|
| 569 |
+
"Assistant:\n"
|
| 570 |
+
"<|begin_url|>http://example.com/python_binary_search.html<|end_url|>\n\n"
|
| 571 |
+
"(System returns full page content)\n\n"
|
| 572 |
+
"Assistant:\n"
|
| 573 |
+
"<|begin_full_page|> ...full page content... <|end_full_page|>\n\n"
|
| 574 |
+
"Now the assistant has enough info and can continue reasoning.\n\n"
|
| 575 |
+
"Remember:\n"
|
| 576 |
+
"- Use <|begin_search_query|> to request a web search and end with <|end_search_query|>.\n"
|
| 577 |
+
"- Use <|begin_url|> to request full page content and end with <|end_url|>.\n"
|
| 578 |
+
"- When done retrieving information, continue your reasoning.\n\n"
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def get_naive_rag_instruction(question, documents):
|
| 583 |
+
return (
|
| 584 |
+
"You are a knowledgeable assistant that uses the provided documents to answer the user's question.\n\n"
|
| 585 |
+
"Question:\n"
|
| 586 |
+
f"{question}\n"
|
| 587 |
+
"Documents:\n"
|
| 588 |
+
f"{documents}\n"
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def get_task_instruction_openqa(question, model_name=None):
|
| 594 |
+
if model_name == 'qwq':
|
| 595 |
+
user_prompt = (
|
| 596 |
+
'Please answer the following question. '
|
| 597 |
+
'You should provide your final answer in the format \\boxed{YOUR_ANSWER}.\n\n'
|
| 598 |
+
f'Question:\n{question}\n\n'
|
| 599 |
+
)
|
| 600 |
+
else:
|
| 601 |
+
user_prompt = (
|
| 602 |
+
'Please answer the following question. You should think step by step to solve it.\n\n'
|
| 603 |
+
'Provide your final answer in the format \\boxed{YOUR_ANSWER}.\n\n'
|
| 604 |
+
f'Question:\n{question}\n\n'
|
| 605 |
+
)
|
| 606 |
+
return user_prompt
|
| 607 |
+
|
| 608 |
+
def get_task_instruction_math(question, model_name=None):
|
| 609 |
+
if model_name == 'qwq':
|
| 610 |
+
# user_prompt = (
|
| 611 |
+
# 'Please answer the following math question. '
|
| 612 |
+
# 'You should provide your final answer in the format \\boxed{YOUR_ANSWER}.\n\n'
|
| 613 |
+
# f'Question:\n{question}\n\n'
|
| 614 |
+
# )
|
| 615 |
+
user_prompt = (
|
| 616 |
+
'Please answer the following math question. '
|
| 617 |
+
'Please reason step by step, and put your final answer within \\boxed{}.\n\n'
|
| 618 |
+
f'Question:\n{question}\n\n'
|
| 619 |
+
)
|
| 620 |
+
else:
|
| 621 |
+
user_prompt = (
|
| 622 |
+
'Please answer the following math question. You should think step by step to solve it.\n\n'
|
| 623 |
+
'Provide your final answer in the format \\boxed{YOUR_ANSWER}.\n\n'
|
| 624 |
+
f'Question:\n{question}\n\n'
|
| 625 |
+
)
|
| 626 |
+
return user_prompt
|
| 627 |
+
|
| 628 |
+
def get_task_instruction_multi_choice(question, model_name=None):
|
| 629 |
+
if model_name == 'qwq':
|
| 630 |
+
user_prompt = (
|
| 631 |
+
'Please answer the following multiple-choice question. '
|
| 632 |
+
'You should provide your final choice in the format \\boxed{YOUR_CHOICE}.\n\n'
|
| 633 |
+
f'Question:\n{question}\n\n'
|
| 634 |
+
)
|
| 635 |
+
elif model_name == 'llama':
|
| 636 |
+
user_prompt = (
|
| 637 |
+
'Please answer the following multiple-choice question. You should think step by step to solve it.\n\n'
|
| 638 |
+
'Provide your final choice in the format \\boxed{YOUR_CHOICE}. Your final choice should be one of the letters A, B, C, or D, DO NOT include any answer content.\n\n'
|
| 639 |
+
f'Question:\n{question}\n\n'
|
| 640 |
+
)
|
| 641 |
+
else:
|
| 642 |
+
user_prompt = (
|
| 643 |
+
'Please answer the following multiple-choice question. You should think step by step to solve it.\n\n'
|
| 644 |
+
'Provide your final choice in the format \\boxed{YOUR_CHOICE}.\n\n'
|
| 645 |
+
f'Question:\n{question}\n\n'
|
| 646 |
+
)
|
| 647 |
+
return user_prompt
|
| 648 |
+
|
| 649 |
+
def get_task_instruction_code(question, question_title=None, model_name=None):
|
| 650 |
+
if model_name == 'qwq':
|
| 651 |
+
user_prompt = (
|
| 652 |
+
'Generate a correct Python program that passes all tests for the given problem. '
|
| 653 |
+
'You should provide your final code within a Python code block using triple backticks (```python\n'
|
| 654 |
+
'YOUR_CODE\n'
|
| 655 |
+
'```).\n\n'
|
| 656 |
+
f'Problem Title: {question_title}\n\n'
|
| 657 |
+
f'Problem Statement:\n{question}\n\n'
|
| 658 |
+
)
|
| 659 |
+
else:
|
| 660 |
+
user_prompt = (
|
| 661 |
+
'You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. '
|
| 662 |
+
f'You should think step by step to solve it.\n\nQuestion:\n{question}\n\n'
|
| 663 |
+
'Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows.\n\n'
|
| 664 |
+
"```python\n# YOUR CODE HERE\n```\n\n"
|
| 665 |
+
)
|
| 666 |
+
return user_prompt
|
| 667 |
+
|
deep_search/search_o1/scripts/reason_one_model.py
ADDED
|
@@ -0,0 +1,915 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_search_o1.py
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import re
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import string
|
| 10 |
+
from typing import Optional, Tuple, List, Dict
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
from transformers import AutoTokenizer
|
| 14 |
+
from vllm import LLM, SamplingParams
|
| 15 |
+
|
| 16 |
+
from bing_search import (
|
| 17 |
+
bing_web_search,
|
| 18 |
+
extract_relevant_info,
|
| 19 |
+
fetch_page_content,
|
| 20 |
+
extract_snippet_with_context
|
| 21 |
+
)
|
| 22 |
+
from evaluate import (
|
| 23 |
+
run_evaluation,
|
| 24 |
+
extract_answer
|
| 25 |
+
)
|
| 26 |
+
from prompts import (
|
| 27 |
+
get_gpqa_search_o1_instruction,
|
| 28 |
+
get_math_search_o1_instruction,
|
| 29 |
+
get_code_search_o1_instruction,
|
| 30 |
+
get_singleqa_search_o1_instruction,
|
| 31 |
+
get_multiqa_search_o1_instruction,
|
| 32 |
+
get_webpage_to_reasonchain_instruction,
|
| 33 |
+
get_task_instruction_openqa,
|
| 34 |
+
get_task_instruction_math,
|
| 35 |
+
get_task_instruction_multi_choice,
|
| 36 |
+
get_task_instruction_code,
|
| 37 |
+
get_singleqa_search_o1_instruction_1,
|
| 38 |
+
get_multiqa_search_o1_instruction_1,
|
| 39 |
+
get_webpage_to_reasonchain_instruction_1,
|
| 40 |
+
get_math_search_o1_instruction_1,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
from openai import OpenAI
|
| 45 |
+
|
| 46 |
+
from add_eval import add_eval
|
| 47 |
+
# Modify OpenAI's API key and API base to use vLLM's API server.
|
| 48 |
+
# 使用 vLLM 的 API 服务器需要修改 OpenAI 的 API 密钥和 API 库。
|
| 49 |
+
|
| 50 |
+
# openai_api_key = "EMPTY"
|
| 51 |
+
# openai_api_base = "http://localhost:8000/v1"
|
| 52 |
+
# client = OpenAI(
|
| 53 |
+
# api_key=openai_api_key,
|
| 54 |
+
# base_url=openai_api_base,
|
| 55 |
+
# )
|
| 56 |
+
|
| 57 |
+
# Define special tokens
|
| 58 |
+
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
| 59 |
+
END_SEARCH_QUERY = "<|end_search_query|>"
|
| 60 |
+
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
| 61 |
+
END_SEARCH_RESULT = "<|end_search_result|>"
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# os.environ['http_proxy'] = 'http://127.0.0.1:7880'
|
| 65 |
+
# os.environ['https_proxy'] = 'http://127.0.0.1:7880'
|
| 66 |
+
|
| 67 |
+
# 增加了cache共享和has answer评测,truncate doc
|
| 68 |
+
|
| 69 |
+
def parse_args():
|
| 70 |
+
parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
|
| 71 |
+
|
| 72 |
+
# Dataset and split configuration
|
| 73 |
+
parser.add_argument(
|
| 74 |
+
'--dataset_name',
|
| 75 |
+
type=str,
|
| 76 |
+
required=True,
|
| 77 |
+
choices=['gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
|
| 78 |
+
help="Name of the dataset to use."
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
parser.add_argument(
|
| 82 |
+
'--split',
|
| 83 |
+
type=str,
|
| 84 |
+
required=True,
|
| 85 |
+
choices=['test', 'diamond', 'main', 'extended'],
|
| 86 |
+
help="Dataset split to use."
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
parser.add_argument(
|
| 90 |
+
'--subset_num',
|
| 91 |
+
type=int,
|
| 92 |
+
default=-1,
|
| 93 |
+
help="Number of examples to process. Defaults to all if not specified."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# Search and document retrieval configuration
|
| 97 |
+
parser.add_argument(
|
| 98 |
+
'--max_search_limit',
|
| 99 |
+
type=int,
|
| 100 |
+
default=10,
|
| 101 |
+
help="Maximum number of searches per question."
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
parser.add_argument(
|
| 105 |
+
'--max_turn',
|
| 106 |
+
type=int,
|
| 107 |
+
default=15,
|
| 108 |
+
help="Maximum number of turns."
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
parser.add_argument( # 使用搜索引擎时,返回的最大文档数
|
| 112 |
+
'--top_k',
|
| 113 |
+
type=int,
|
| 114 |
+
default=10,
|
| 115 |
+
help="Maximum number of search documents to return."
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
parser.add_argument(
|
| 119 |
+
'--max_doc_len',
|
| 120 |
+
type=int,
|
| 121 |
+
default=3000,
|
| 122 |
+
help="Maximum length of each searched document."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# parser.add_argument(
|
| 126 |
+
# '--use_jina',
|
| 127 |
+
# type=bool,
|
| 128 |
+
# default=False,
|
| 129 |
+
# help="Whether to use Jina API for document fetching."
|
| 130 |
+
# )
|
| 131 |
+
parser.add_argument(
|
| 132 |
+
'--use_jina',
|
| 133 |
+
action='store_true',
|
| 134 |
+
help="Whether to use Jina API for document fetching."
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
parser.add_argument(
|
| 138 |
+
'--jina_api_key',
|
| 139 |
+
type=str,
|
| 140 |
+
default='None',
|
| 141 |
+
help="Your Jina API Key to Fetch URL Content."
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
# Model configuration
|
| 145 |
+
parser.add_argument(
|
| 146 |
+
'--model_path',
|
| 147 |
+
type=str,
|
| 148 |
+
required=True,
|
| 149 |
+
help="Path to the pre-trained model."
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
# Sampling parameters
|
| 153 |
+
parser.add_argument(
|
| 154 |
+
'--temperature',
|
| 155 |
+
type=float,
|
| 156 |
+
default=0.7,
|
| 157 |
+
help="Sampling temperature."
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
parser.add_argument(
|
| 161 |
+
'--top_p',
|
| 162 |
+
type=float,
|
| 163 |
+
default=0.8,
|
| 164 |
+
help="Top-p sampling parameter."
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
parser.add_argument(
|
| 168 |
+
'--top_k_sampling',
|
| 169 |
+
type=int,
|
| 170 |
+
default=20,
|
| 171 |
+
help="Top-k sampling parameter."
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
parser.add_argument(
|
| 175 |
+
'--repetition_penalty',
|
| 176 |
+
type=float,
|
| 177 |
+
default=None,
|
| 178 |
+
help="Repetition penalty. If not set, defaults based on the model."
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
parser.add_argument(
|
| 182 |
+
'--max_tokens',
|
| 183 |
+
type=int,
|
| 184 |
+
default=32768,
|
| 185 |
+
help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# Bing API Configuration
|
| 189 |
+
parser.add_argument(
|
| 190 |
+
'--bing_subscription_key',
|
| 191 |
+
type=str,
|
| 192 |
+
required=True,
|
| 193 |
+
help="Bing Search API subscription key."
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
parser.add_argument(
|
| 197 |
+
'--bing_endpoint',
|
| 198 |
+
type=str,
|
| 199 |
+
default="https://api.bing.microsoft.com/v7.0/search",
|
| 200 |
+
help="Bing Search API endpoint."
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
parser.add_argument(
|
| 204 |
+
'--cache_dir_base',
|
| 205 |
+
type=str,
|
| 206 |
+
required=True,
|
| 207 |
+
help="cache path."
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
parser.add_argument(
|
| 211 |
+
'--output_dir_base',
|
| 212 |
+
type=str,
|
| 213 |
+
required=True,
|
| 214 |
+
help="output_dir"
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
# parser.add_argument(
|
| 218 |
+
# '--model_doc_reason_path',
|
| 219 |
+
# type=str,
|
| 220 |
+
# required=True,
|
| 221 |
+
# help="Path to the document reasoning model."
|
| 222 |
+
# )
|
| 223 |
+
|
| 224 |
+
# openai_api_base
|
| 225 |
+
# parser.add_argument(
|
| 226 |
+
# '--openai_api_base',
|
| 227 |
+
# type=str,
|
| 228 |
+
# required=True,
|
| 229 |
+
# help="openai_api_base"
|
| 230 |
+
# )
|
| 231 |
+
# parser.add_argument(
|
| 232 |
+
# '--data_path',
|
| 233 |
+
# type=str,
|
| 234 |
+
# required=True,
|
| 235 |
+
# help="Path to the document reasoning model."
|
| 236 |
+
# )
|
| 237 |
+
return parser.parse_args()
|
| 238 |
+
|
| 239 |
+
def main():
|
| 240 |
+
args = parse_args()
|
| 241 |
+
print(f"args.use_jina: {args.use_jina}")
|
| 242 |
+
# Extract arguments
|
| 243 |
+
dataset_name = args.dataset_name
|
| 244 |
+
split = args.split
|
| 245 |
+
subset_num = args.subset_num
|
| 246 |
+
MAX_SEARCH_LIMIT = args.max_search_limit
|
| 247 |
+
MAX_TURN = args.max_turn
|
| 248 |
+
top_k = args.top_k
|
| 249 |
+
max_doc_len = args.max_doc_len
|
| 250 |
+
model_path = args.model_path
|
| 251 |
+
# model_doc_reason_path = args.model_doc_reason_path
|
| 252 |
+
temperature = args.temperature
|
| 253 |
+
top_p = args.top_p
|
| 254 |
+
top_k_sampling = args.top_k_sampling
|
| 255 |
+
repetition_penalty = args.repetition_penalty
|
| 256 |
+
max_tokens = args.max_tokens
|
| 257 |
+
bing_subscription_key = args.bing_subscription_key
|
| 258 |
+
bing_endpoint = args.bing_endpoint
|
| 259 |
+
use_jina = args.use_jina
|
| 260 |
+
jina_api_key = args.jina_api_key
|
| 261 |
+
cache_dir_base = args.cache_dir_base
|
| 262 |
+
output_dir_base = args.output_dir_base
|
| 263 |
+
# openai_api_base = args.openai_api_base
|
| 264 |
+
use_jina = False
|
| 265 |
+
print(f"use_jina: {use_jina}")
|
| 266 |
+
|
| 267 |
+
print(f"CUDA_VISIBLE_DEVICES is set to: {os.environ['CUDA_VISIBLE_DEVICES']}")
|
| 268 |
+
|
| 269 |
+
# openai_api_key = "EMPTY"
|
| 270 |
+
# openai_api_base = openai_api_base
|
| 271 |
+
# client = OpenAI(
|
| 272 |
+
# api_key=openai_api_key,
|
| 273 |
+
# base_url=openai_api_base,
|
| 274 |
+
# )
|
| 275 |
+
|
| 276 |
+
# Adjust parameters based on dataset
|
| 277 |
+
if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki', 'medmcqa', 'pubhealth']:
|
| 278 |
+
MAX_SEARCH_LIMIT = 5
|
| 279 |
+
if dataset_name in ['hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 280 |
+
MAX_SEARCH_LIMIT = 10
|
| 281 |
+
MAX_TURN = 15
|
| 282 |
+
top_k = 10
|
| 283 |
+
max_doc_len = 3000
|
| 284 |
+
|
| 285 |
+
if args.jina_api_key == 'None':
|
| 286 |
+
jina_api_key = None
|
| 287 |
+
|
| 288 |
+
# Set default repetition_penalty if not provided
|
| 289 |
+
if repetition_penalty is None:
|
| 290 |
+
repetition_penalty = 1.05 if 'qwq' in model_path.lower() else 1.0
|
| 291 |
+
|
| 292 |
+
# Data paths based on dataset
|
| 293 |
+
if split == "test": # 测试用的数据集地址
|
| 294 |
+
data_path = f"./data/test/{dataset_name}.json"
|
| 295 |
+
else: # 训练用的数据集地址
|
| 296 |
+
if dataset_name == 'livecode':
|
| 297 |
+
data_path = f'./data/LiveCodeBench/{split}.json'
|
| 298 |
+
elif dataset_name in ['math500', 'gpqa', 'aime', 'amc']:
|
| 299 |
+
data_path = f'./data/{dataset_name.upper()}/{split}.json'
|
| 300 |
+
else:
|
| 301 |
+
data_path = f'./data/QA_Datasets/{dataset_name}.json'
|
| 302 |
+
|
| 303 |
+
print('-----------------------')
|
| 304 |
+
print(f'Using {dataset_name} {split} set.')
|
| 305 |
+
print('-----------------------')
|
| 306 |
+
|
| 307 |
+
# ---------------------- Caching Mechanism ----------------------
|
| 308 |
+
# Define cache directories and file paths
|
| 309 |
+
# cache_dir = './cache'
|
| 310 |
+
model_name = model_path.split('/')[-1].replace('-instruct', '')
|
| 311 |
+
# cache_dir = f'./{cache_dir_base}_{dataset_name}_{model_name}'
|
| 312 |
+
cache_dir = cache_dir_base
|
| 313 |
+
search_cache_path = os.path.join(cache_dir, 'search_cache.json')
|
| 314 |
+
url_cache_path = os.path.join(cache_dir, 'url_cache.json')
|
| 315 |
+
|
| 316 |
+
# Ensure cache directory exists
|
| 317 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 318 |
+
|
| 319 |
+
# Load existing caches or initialize empty dictionaries
|
| 320 |
+
if os.path.exists(search_cache_path):
|
| 321 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 322 |
+
search_cache = json.load(f)
|
| 323 |
+
else:
|
| 324 |
+
search_cache = {}
|
| 325 |
+
|
| 326 |
+
if os.path.exists(url_cache_path):
|
| 327 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 328 |
+
url_cache = json.load(f)
|
| 329 |
+
else:
|
| 330 |
+
url_cache = {}
|
| 331 |
+
|
| 332 |
+
# Function to save caches
|
| 333 |
+
def save_caches():
|
| 334 |
+
with open(search_cache_path, 'w', encoding='utf-8') as f:
|
| 335 |
+
json.dump(search_cache, f, ensure_ascii=False, indent=2)
|
| 336 |
+
with open(url_cache_path, 'w', encoding='utf-8') as f:
|
| 337 |
+
json.dump(url_cache, f, ensure_ascii=False, indent=2)
|
| 338 |
+
|
| 339 |
+
# ---------------------- Model Loading ----------------------
|
| 340 |
+
print(f"Loading tokenizer from {model_path}...")
|
| 341 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 342 |
+
if tokenizer.pad_token is None:
|
| 343 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 344 |
+
tokenizer.padding_side = 'left' # 主要是左���充
|
| 345 |
+
print("Tokenizer loaded successfully.")
|
| 346 |
+
|
| 347 |
+
# Define output directory based on model and dataset
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
# if 'qwq' in model_path.lower():
|
| 351 |
+
# if dataset_name in ['math500', 'gpqa', 'aime', 'amc', 'livecode']:
|
| 352 |
+
# output_dir = f'./{output_dir_base}/{dataset_name}.qwq.search_o1'
|
| 353 |
+
# if dataset_name == 'gpqa' and (MAX_SEARCH_LIMIT != 5 or top_k != 10):
|
| 354 |
+
# output_dir = f'./{output_dir_base}/runs.analysis/{dataset_name}.qwq.search_o1.{MAX_SEARCH_LIMIT}.{top_k}'
|
| 355 |
+
# else:
|
| 356 |
+
# output_dir = f'./{output_dir_base}/runs.qa/{dataset_name}.qwq.search_o1'
|
| 357 |
+
# else:
|
| 358 |
+
# model_short_name = model_path.split('/')[-1].lower().replace('-instruct', '')
|
| 359 |
+
# output_dir = f'./{output_dir_base}/runs.baselines/{dataset_name}.{model_short_name}.search_o1'
|
| 360 |
+
output_dir = output_dir_base
|
| 361 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 362 |
+
|
| 363 |
+
print(f"Loading model from {model_path}...")
|
| 364 |
+
print(f"device_count: {torch.cuda.device_count()}")
|
| 365 |
+
|
| 366 |
+
# Initialize the LLM
|
| 367 |
+
llm = LLM(
|
| 368 |
+
model=model_path,
|
| 369 |
+
tensor_parallel_size=torch.cuda.device_count(),
|
| 370 |
+
gpu_memory_utilization=0.95,
|
| 371 |
+
|
| 372 |
+
)
|
| 373 |
+
print("Model loaded successfully.")
|
| 374 |
+
|
| 375 |
+
# # ----------------------Loading model to reason in document ----------------------
|
| 376 |
+
|
| 377 |
+
# print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
|
| 378 |
+
# tokenizer_doc_reason = AutoTokenizer.from_pretrained(model_doc_reason_path, trust_remote_code=True)
|
| 379 |
+
# if tokenizer_doc_reason.pad_token is None:
|
| 380 |
+
# tokenizer_doc_reason.pad_token = tokenizer_doc_reason.eos_token
|
| 381 |
+
# tokenizer_doc_reason.padding_side = 'left' # 主要是左填充
|
| 382 |
+
# print("tokenizer_doc_reason loaded successfully.")
|
| 383 |
+
|
| 384 |
+
# print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
|
| 385 |
+
|
| 386 |
+
# # Initialize the LLM
|
| 387 |
+
# # torch.cuda.set_device(6,7)
|
| 388 |
+
|
| 389 |
+
# llm_doc_reason = LLM(
|
| 390 |
+
# model=model_doc_reason_path,
|
| 391 |
+
# tensor_parallel_size=2,
|
| 392 |
+
# gpu_memory_utilization=0.95,
|
| 393 |
+
|
| 394 |
+
# )
|
| 395 |
+
# print("Model_doc_reason loaded successfully.")
|
| 396 |
+
|
| 397 |
+
# ---------------------- Data Loading ----------------------
|
| 398 |
+
print(f"Loading data from {data_path}...")
|
| 399 |
+
with open(data_path, 'r', encoding='utf-8') as json_file:
|
| 400 |
+
filtered_data = json.load(json_file)
|
| 401 |
+
print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
|
| 402 |
+
|
| 403 |
+
# ---------------------- Batch Generation Function ----------------------
|
| 404 |
+
def generate_webpage_to_reasonchain_batch( # 模型基于网页内容生成推理,然后从模型的回复中提取答案
|
| 405 |
+
original_questions: List[str],
|
| 406 |
+
prev_reasonings: List[str],
|
| 407 |
+
search_queries: List[str],
|
| 408 |
+
documents: List[str],
|
| 409 |
+
dataset_name: str,
|
| 410 |
+
batch_output_records: List[Dict], # New parameter to collect outputs
|
| 411 |
+
max_tokens: int = 32768,
|
| 412 |
+
coherent: bool = False,
|
| 413 |
+
) -> List[str]:
|
| 414 |
+
|
| 415 |
+
if "Qwen2.5" in model_path:
|
| 416 |
+
max_tokens = 8192
|
| 417 |
+
|
| 418 |
+
encode_docs = tokenizer(documents, truncation=True, max_length=20000, add_special_tokens=False)["input_ids"]
|
| 419 |
+
documents = tokenizer.batch_decode(encode_docs)
|
| 420 |
+
|
| 421 |
+
# 统计每个文档的长度
|
| 422 |
+
doc_lengths = [len(doc) for doc in encode_docs]
|
| 423 |
+
|
| 424 |
+
# # 打印每个文档的长度
|
| 425 |
+
# for i, length in enumerate(doc_lengths):
|
| 426 |
+
# print(f"Document {i + 1}: {length} tokens")
|
| 427 |
+
|
| 428 |
+
# 如果需要返回长度列表,可以直接使用 doc_lengths
|
| 429 |
+
print(f"for {model_path}, set max_tokens={max_tokens} for doc gen, truncate documnets. ")
|
| 430 |
+
print("All document lengths:", doc_lengths)
|
| 431 |
+
|
| 432 |
+
user_prompts = [ # 根据之前的推理,搜索query和搜索到的doc生成用户提示
|
| 433 |
+
get_webpage_to_reasonchain_instruction(r, sq, doc)
|
| 434 |
+
for r, sq, doc in zip(prev_reasonings, search_queries, documents)
|
| 435 |
+
]
|
| 436 |
+
|
| 437 |
+
prompts = [{"role": "user", "content": up} for up in user_prompts]
|
| 438 |
+
prompts = [tokenizer.apply_chat_template([p], tokenize=False, add_generation_prompt=True) for p in prompts]
|
| 439 |
+
|
| 440 |
+
output = llm.generate( # 生成模型回复
|
| 441 |
+
prompts,
|
| 442 |
+
sampling_params=SamplingParams(
|
| 443 |
+
max_tokens=max_tokens,
|
| 444 |
+
temperature=0.7,
|
| 445 |
+
top_p=0.8,
|
| 446 |
+
top_k=20,
|
| 447 |
+
repetition_penalty=1.05,
|
| 448 |
+
)
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
raw_outputs = [out.outputs[0].text for out in output]
|
| 452 |
+
extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs] # 提取模型基于网页生成的推理
|
| 453 |
+
|
| 454 |
+
for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
|
| 455 |
+
batch_output_records.append({
|
| 456 |
+
'prompt': p,
|
| 457 |
+
'raw_output': r,
|
| 458 |
+
'extracted_info': e
|
| 459 |
+
})
|
| 460 |
+
|
| 461 |
+
return extracted_infos
|
| 462 |
+
|
| 463 |
+
# ---------------------- Preparation of Input Prompts ----------------------
|
| 464 |
+
input_list = []
|
| 465 |
+
for item in filtered_data: # 生成prompts
|
| 466 |
+
question = item['Question']
|
| 467 |
+
|
| 468 |
+
if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 469 |
+
if dataset_name in ['nq', 'triviaqa']:
|
| 470 |
+
instruction = get_singleqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
|
| 471 |
+
elif dataset_name in ['hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 472 |
+
instruction = get_multiqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
|
| 473 |
+
if 'qwq' in model_path.lower():
|
| 474 |
+
user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 475 |
+
else:
|
| 476 |
+
user_prompt = get_task_instruction_openqa(question)
|
| 477 |
+
|
| 478 |
+
elif dataset_name in ['math500', 'aime', 'amc']:
|
| 479 |
+
instruction = get_math_search_o1_instruction_1(MAX_SEARCH_LIMIT)
|
| 480 |
+
if 'qwq' in model_path.lower():
|
| 481 |
+
user_prompt = get_task_instruction_math(question, model_name='qwq')
|
| 482 |
+
else:
|
| 483 |
+
user_prompt = get_task_instruction_math(question)
|
| 484 |
+
|
| 485 |
+
elif dataset_name == 'gpqa':
|
| 486 |
+
instruction = get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 487 |
+
if 'qwq' in model_path.lower():
|
| 488 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
|
| 489 |
+
elif 'llama' in model_path.lower():
|
| 490 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='llama')
|
| 491 |
+
else:
|
| 492 |
+
user_prompt = get_task_instruction_multi_choice(question)
|
| 493 |
+
|
| 494 |
+
elif dataset_name == 'livecode':
|
| 495 |
+
instruction = get_code_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 496 |
+
question_title = item.get('question_title', '')
|
| 497 |
+
if 'qwq' in model_path.lower():
|
| 498 |
+
user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
|
| 499 |
+
else:
|
| 500 |
+
user_prompt = get_task_instruction_code(question)
|
| 501 |
+
else:
|
| 502 |
+
user_prompt = "" # Default to empty if dataset not matched
|
| 503 |
+
|
| 504 |
+
prompt = [{"role": "user", "content": instruction + user_prompt}] # instruction是告诉模型怎么进行搜索,user_prompt是用户具体问题
|
| 505 |
+
prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
|
| 506 |
+
input_list.append(prompt)
|
| 507 |
+
|
| 508 |
+
if subset_num != -1:
|
| 509 |
+
input_list = input_list[:subset_num]
|
| 510 |
+
filtered_data = filtered_data[:subset_num]
|
| 511 |
+
|
| 512 |
+
# Initialize active sequences
|
| 513 |
+
active_sequences = [{ # 记录每个问题的搜索历史
|
| 514 |
+
'item': item,
|
| 515 |
+
'prompt': prompt,
|
| 516 |
+
'output': '',
|
| 517 |
+
'finished': False, # 一开始均为未完成
|
| 518 |
+
'history': [],
|
| 519 |
+
'search_count': 0,
|
| 520 |
+
'executed_search_queries': set(),
|
| 521 |
+
'all_info': [],
|
| 522 |
+
} for item, prompt in zip(filtered_data, input_list)]
|
| 523 |
+
|
| 524 |
+
# ---------------------- Set Max Tokens ----------------------
|
| 525 |
+
# if 'qwq' in model_path.lower():
|
| 526 |
+
# if dataset_name in ['aime', 'amc', 'livecode']:
|
| 527 |
+
# max_tokens = 32768
|
| 528 |
+
# else:
|
| 529 |
+
# max_tokens = 20480
|
| 530 |
+
# else:
|
| 531 |
+
# max_tokens = 8192
|
| 532 |
+
# max_tokens = 16384
|
| 533 |
+
if dataset_name in ['aime', 'amc', 'livecode']:
|
| 534 |
+
max_tokens = 32768
|
| 535 |
+
else:
|
| 536 |
+
max_tokens = 20480
|
| 537 |
+
# ---------------------- Generation Function ----------------------
|
| 538 |
+
def run_generation(sequences: List[Dict], max_tokens: int) -> List:
|
| 539 |
+
prompts = [s['prompt'] for s in sequences] # 提取prompt
|
| 540 |
+
sampling_params = SamplingParams(
|
| 541 |
+
max_tokens=max_tokens,
|
| 542 |
+
temperature=temperature,
|
| 543 |
+
top_p=top_p,
|
| 544 |
+
top_k=top_k_sampling,
|
| 545 |
+
repetition_penalty=repetition_penalty,
|
| 546 |
+
stop=[END_SEARCH_QUERY, tokenizer.eos_token],
|
| 547 |
+
include_stop_str_in_output=True,
|
| 548 |
+
)
|
| 549 |
+
output_list = llm.generate(prompts, sampling_params=sampling_params) # 模型根据prompt生成回答
|
| 550 |
+
print(f"run_generation completed {len(output_list)}")
|
| 551 |
+
return output_list
|
| 552 |
+
|
| 553 |
+
# Function to extract text between two tags 提取位于 start_tag 和 end_tag 之间的内容
|
| 554 |
+
def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
|
| 555 |
+
pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
|
| 556 |
+
matches = re.findall(pattern, text, flags=re.DOTALL)
|
| 557 |
+
if matches:
|
| 558 |
+
return matches[-1].strip()
|
| 559 |
+
return None
|
| 560 |
+
|
| 561 |
+
def replace_recent_steps(origin_str, replace_str): # 使用replace_str更新origin_str
|
| 562 |
+
"""
|
| 563 |
+
Replaces specific steps in the original reasoning steps with new steps.
|
| 564 |
+
If a replacement step contains "DELETE THIS STEP", that step is removed.
|
| 565 |
+
|
| 566 |
+
Parameters:
|
| 567 |
+
- origin_str (str): The original reasoning steps.
|
| 568 |
+
- replace_str (str): The steps to replace or delete.
|
| 569 |
+
|
| 570 |
+
Returns:
|
| 571 |
+
- str: The updated reasoning steps after applying replacements.
|
| 572 |
+
这个函数的主要功能是替换给定的推理步骤(reasoning steps),
|
| 573 |
+
并根据传入的 replace_str 进行更新。
|
| 574 |
+
如果新的步骤包含 "DELETE THIS STEP",则删除该步骤
|
| 575 |
+
"""
|
| 576 |
+
|
| 577 |
+
def parse_steps(text):
|
| 578 |
+
"""
|
| 579 |
+
Parses the reasoning steps from a given text.
|
| 580 |
+
|
| 581 |
+
Parameters:
|
| 582 |
+
- text (str): The text containing reasoning steps.
|
| 583 |
+
|
| 584 |
+
Returns:
|
| 585 |
+
- dict: A dictionary mapping step numbers to their content.
|
| 586 |
+
"""
|
| 587 |
+
step_pattern = re.compile(r"Step\s+(\d+):\s*") # 这个模式会匹配 "Step" 后面跟一个或多个空格,然后是一个数字(步骤编号),最后是冒号
|
| 588 |
+
steps = {}
|
| 589 |
+
current_step_num = None
|
| 590 |
+
current_content = []
|
| 591 |
+
|
| 592 |
+
for line in text.splitlines(): # 将输入的文本按行分割,并逐行遍历。每一行会被检查是否包含一个步骤
|
| 593 |
+
step_match = step_pattern.match(line)
|
| 594 |
+
if step_match: # 匹配到一个新的步骤
|
| 595 |
+
# If there's an ongoing step, save its content,如果当前的步骤不为空,将其为上一个步骤,先将上一个步骤的内容(存在current_content中)保存,然后再更新current_step_num和current_content
|
| 596 |
+
if current_step_num is not None:
|
| 597 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 598 |
+
current_step_num = int(step_match.group(1))
|
| 599 |
+
content = line[step_match.end():].strip()
|
| 600 |
+
current_content = [content] if content else []
|
| 601 |
+
else:
|
| 602 |
+
if current_step_num is not None:
|
| 603 |
+
current_content.append(line)
|
| 604 |
+
|
| 605 |
+
# Save the last step if any
|
| 606 |
+
if current_step_num is not None: # 保存最后一个步骤
|
| 607 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 608 |
+
|
| 609 |
+
return steps
|
| 610 |
+
|
| 611 |
+
# Parse the original and replacement steps
|
| 612 |
+
origin_steps = parse_steps(origin_str) # 解析原始的推理步骤
|
| 613 |
+
replace_steps = parse_steps(replace_str) # 解析要替换的推理步骤
|
| 614 |
+
|
| 615 |
+
# Apply replacements
|
| 616 |
+
for step_num, content in replace_steps.items(): # 遍历要替换的步骤
|
| 617 |
+
if "DELETE THIS STEP" in content:
|
| 618 |
+
# Remove the step if it exists
|
| 619 |
+
if step_num in origin_steps: # 如果要删除的步骤在原始的推理步骤中存在,则删除该步骤
|
| 620 |
+
del origin_steps[step_num]
|
| 621 |
+
else: # 如果要替换的步骤不是要删除的步骤,则替换该步骤
|
| 622 |
+
# Replace or add the step
|
| 623 |
+
origin_steps[step_num] = content
|
| 624 |
+
|
| 625 |
+
# Sort the steps by step number
|
| 626 |
+
sorted_steps = sorted(origin_steps.items()) # 按照步骤编号对步骤进行排序
|
| 627 |
+
|
| 628 |
+
# Reconstruct the reasoning steps as a single string
|
| 629 |
+
new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps]) # 根据排序后的步骤构建新的推理步骤字符串,步骤之间以两个换行符分隔
|
| 630 |
+
|
| 631 |
+
return new_reasoning_steps
|
| 632 |
+
|
| 633 |
+
# ---------------------- Initialize Collection Structure ----------------------
|
| 634 |
+
# Initialize a list to collect batch outputs
|
| 635 |
+
batch_output_records = []
|
| 636 |
+
|
| 637 |
+
start_time = time.time()
|
| 638 |
+
turn = 0
|
| 639 |
+
|
| 640 |
+
# 流程
|
| 641 |
+
# 首先根据prompt让模型生成回复
|
| 642 |
+
# 从模型的回复中提取搜索查询
|
| 643 |
+
# 如果有(回复要以END_SEARCH_QUERY结尾)
|
| 644 |
+
# 根据搜索查询,从互联网上提取相关信息
|
| 645 |
+
# 处理查询的信息
|
| 646 |
+
# 让模型基于之前的步骤,检索query和查询到的信息生成新的推理,得到search result
|
| 647 |
+
# 然后回到第一步(这里模型就会根据前面的search result,再次生成新的回复
|
| 648 |
+
# 如果没有查询则该条问题结束
|
| 649 |
+
|
| 650 |
+
# Main loop until all sequences are finished or maximum turns reached
|
| 651 |
+
while True:
|
| 652 |
+
# Identify sequences that need generation
|
| 653 |
+
sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']] # 筛选出需要生成的新内容的序列,active_sequences 是一个包含所有活跃序列的列表
|
| 654 |
+
|
| 655 |
+
if sequences_needing_generation:
|
| 656 |
+
turn += 1
|
| 657 |
+
print(f'\n-------------- Turn {turn} --------------')
|
| 658 |
+
print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
|
| 659 |
+
outputs = run_generation(sequences_needing_generation, max_tokens) # 根据prompt
|
| 660 |
+
print("Generation completed, processing outputs...")
|
| 661 |
+
|
| 662 |
+
# Initialize batch variables
|
| 663 |
+
batch_relevant_info = []
|
| 664 |
+
batch_original_questions = []
|
| 665 |
+
batch_prev_reasonings = []
|
| 666 |
+
batch_search_queries = []
|
| 667 |
+
batch_documents = []
|
| 668 |
+
batch_sequences = []
|
| 669 |
+
|
| 670 |
+
# Collect URLs to fetch across all sequences
|
| 671 |
+
all_urls_to_fetch = set() # 初始化一个集合 all_urls_to_fetch 用来收集所有需要获取的 URL
|
| 672 |
+
url_snippets = {}
|
| 673 |
+
url_sequence_map = {} # Map URL to list of sequences needing it
|
| 674 |
+
|
| 675 |
+
# Process each sequence and collect URLs
|
| 676 |
+
for seq, out in zip(sequences_needing_generation, outputs): # 遍历需要生成新内容的序列,并生成新内容,同时收集需要获取的 URL
|
| 677 |
+
text = out.outputs[0].text # 将生成的文本添加到序列的历史记录、提示和输出中
|
| 678 |
+
seq['history'].append(text)
|
| 679 |
+
# Append generated text to prompt and output
|
| 680 |
+
seq['prompt'] += text
|
| 681 |
+
seq['output'] += text
|
| 682 |
+
seq['all_info'].append({f"turn_{turn}_reason": text})
|
| 683 |
+
# Extract search query
|
| 684 |
+
search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY) # 提取搜索查询
|
| 685 |
+
|
| 686 |
+
# If a search query is present and needs to be executed
|
| 687 |
+
if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
|
| 688 |
+
if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
|
| 689 |
+
# Execute search, use cache if available
|
| 690 |
+
if search_query in search_cache:
|
| 691 |
+
results = search_cache[search_query] # 如果搜索查询结果在缓存中存在,则从缓存中获取结果
|
| 692 |
+
print(f"Using cached search results for query: \"{search_query}\"")
|
| 693 |
+
else:
|
| 694 |
+
try:
|
| 695 |
+
print(f"Execute and cache search for query: \"{search_query}\"")
|
| 696 |
+
results = bing_web_search(search_query, bing_subscription_key, bing_endpoint, market='en-US', language='en') # 执行搜索
|
| 697 |
+
search_cache[search_query] = results # 将搜索结果添加到缓存中
|
| 698 |
+
print(f"Executed and cached search for query: \"{search_query}\"")
|
| 699 |
+
except Exception as e:
|
| 700 |
+
print(f"Error during search query '{search_query}': {e}")
|
| 701 |
+
search_cache[search_query] = {}
|
| 702 |
+
results = {}
|
| 703 |
+
|
| 704 |
+
# Extract relevant information from Bing search results
|
| 705 |
+
relevant_info = extract_relevant_info(results)[:top_k] # 从搜索结果中提取出最相关的信息
|
| 706 |
+
seq['relevant_info'] = relevant_info
|
| 707 |
+
|
| 708 |
+
# Extract URLs and snippets
|
| 709 |
+
urls_to_fetch = [it['url'] for it in relevant_info] # 从搜索结果中提取出所有 URL
|
| 710 |
+
snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info} # 创建一个字典 snippets,将 URL 映射到它们对应的片段(snippet)。如果 snippet 字段存在,则把它加入字典
|
| 711 |
+
|
| 712 |
+
# Filter URLs that are not cached
|
| 713 |
+
urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache] # 筛选出所有没有被缓存的 UR
|
| 714 |
+
cached_urls = [u for u in urls_to_fetch if u in url_cache] # 选出已经缓存的 URL(即在 url_cache 中存在的 URL)。这些 URL 已经被处理过,不需要再次请求
|
| 715 |
+
|
| 716 |
+
# Store info for all_urls_to_fetch and url_snippets
|
| 717 |
+
for url in urls_to_fetch_filtered:
|
| 718 |
+
all_urls_to_fetch.add(url)
|
| 719 |
+
url_snippets[url] = snippets.get(url, "") # 将每个 URL 对应的片段存储到 url_snippets 字典中
|
| 720 |
+
|
| 721 |
+
all_reasoning_steps = seq['output']
|
| 722 |
+
all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n") # 将连续的空行(\n\n)替换为单个换行符(\n),然后按换行符拆分文本,得到每个推理步骤单独的一行
|
| 723 |
+
|
| 724 |
+
truncated_prev_reasoning = ""
|
| 725 |
+
for i, step in enumerate(all_reasoning_steps):
|
| 726 |
+
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n" # 遍历 all_reasoning_steps 中的每个步骤,并将每个步骤编号和步骤内容格式化后,添加到 truncated_prev_reasoning 字符串中。这样生成一个包含步骤编号和内容的字符串
|
| 727 |
+
|
| 728 |
+
prev_steps = truncated_prev_reasoning.split('\n\n') # 将推理步骤字符串 truncated_prev_reasoning 按照每两个换行符拆分成多个步骤
|
| 729 |
+
if len(prev_steps) <= 5: # 如果步骤的数量不超过 5,直接保留所有步骤
|
| 730 |
+
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
| 731 |
+
else:
|
| 732 |
+
truncated_prev_reasoning = ''
|
| 733 |
+
for i, step in enumerate(prev_steps): # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 734 |
+
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
| 735 |
+
truncated_prev_reasoning += step + '\n\n'
|
| 736 |
+
else: # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 737 |
+
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
| 738 |
+
truncated_prev_reasoning += '...\n\n'
|
| 739 |
+
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
| 740 |
+
|
| 741 |
+
# Collect parameters for batch processing
|
| 742 |
+
batch_relevant_info.append(relevant_info) # 搜索出来的信息
|
| 743 |
+
batch_original_questions.append(seq['item']['Question']) # 原始问题
|
| 744 |
+
batch_prev_reasonings.append(truncated_prev_reasoning) # 之前的推理步骤
|
| 745 |
+
batch_search_queries.append(search_query) # 搜索查询
|
| 746 |
+
batch_sequences.append(seq)
|
| 747 |
+
|
| 748 |
+
# Update search count and executed queries
|
| 749 |
+
seq['search_count'] += 1 # 更新搜索计数
|
| 750 |
+
seq['executed_search_queries'].add(search_query) # 将已执行的搜索查询添加到集合中
|
| 751 |
+
|
| 752 |
+
elif seq['search_count'] >= MAX_SEARCH_LIMIT: # 如果搜索次数达到或超过该限制,则返回一条消息,通知该查询无法再进行
|
| 753 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
| 754 |
+
seq['prompt'] += limit_message
|
| 755 |
+
seq['output'] += limit_message
|
| 756 |
+
seq['history'].append(limit_message)
|
| 757 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 758 |
+
print(f"Search limit reached for query: \"{search_query}\"")
|
| 759 |
+
|
| 760 |
+
elif search_query in seq['executed_search_queries']: # 如果当前查询已经执行过,则返回一个消息,提示用户查询已重复,并引导其查看之前的结果
|
| 761 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
| 762 |
+
seq['prompt'] += limit_message
|
| 763 |
+
seq['output'] += limit_message
|
| 764 |
+
seq['history'].append(limit_message)
|
| 765 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 766 |
+
print(f"Repeated search for query: \"{search_query}\"")
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
else: # 如果当前序列没有需要执行的搜索查询,则将该序列标记为完成,并打印提示信息
|
| 770 |
+
# If no search query needs to be executed, mark the sequence as finished
|
| 771 |
+
seq['finished'] = True
|
| 772 |
+
print("Sequence marked as complete.")
|
| 773 |
+
|
| 774 |
+
print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
|
| 775 |
+
print(f"all_urls_to_fetch: {all_urls_to_fetch}")
|
| 776 |
+
# Batch fetch all URLs at once to optimize speed
|
| 777 |
+
|
| 778 |
+
if all_urls_to_fetch:
|
| 779 |
+
print(f"Fetching {len(all_urls_to_fetch)} URLs...")
|
| 780 |
+
try:
|
| 781 |
+
fetched_contents = fetch_page_content( # 一次性获取所有 URL 的中搜索出来的内容
|
| 782 |
+
list(all_urls_to_fetch),
|
| 783 |
+
use_jina=use_jina,
|
| 784 |
+
jina_api_key=jina_api_key,
|
| 785 |
+
# snippets=url_snippets # Do not pass snippets when updating url_cache directly
|
| 786 |
+
)
|
| 787 |
+
print(f"Fetched {len(fetched_contents)} URLs successfully.")
|
| 788 |
+
except Exception as e:
|
| 789 |
+
print(f"Error during batch URL fetching: {e}")
|
| 790 |
+
fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
|
| 791 |
+
# Update cache with fetched contents
|
| 792 |
+
for url, content in fetched_contents.items(): # 将获取的内容添加到 url_cache 中
|
| 793 |
+
url_cache[url] = content
|
| 794 |
+
|
| 795 |
+
# After fetching, prepare formatted documents for batch processing
|
| 796 |
+
for relevant_info in batch_relevant_info:
|
| 797 |
+
formatted_documents = "" # 初始化一个空字符串 formatted_documents,用于拼接本次要处理的所有网页信息。后面会将其添加到 batch_documents 列表中
|
| 798 |
+
for i, doc_info in enumerate(relevant_info):
|
| 799 |
+
url = doc_info['url']
|
| 800 |
+
raw_context = url_cache.get(url, "") # 获取 url 对应的内容
|
| 801 |
+
doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
|
| 802 |
+
success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
|
| 803 |
+
if success:
|
| 804 |
+
context = filtered_context
|
| 805 |
+
else: # 否则就取 raw_context 的前 max_doc_len * 2 个字符,作为一个有限的片段,避免过长导致后续处理负担
|
| 806 |
+
context = raw_context[:max_doc_len*2]
|
| 807 |
+
|
| 808 |
+
doc_info['context'] = context
|
| 809 |
+
formatted_documents += f"**Web Page {i + 1}:**\n"
|
| 810 |
+
formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
|
| 811 |
+
|
| 812 |
+
batch_documents.append(formatted_documents) # 将本组搜索结果的所有信息(拼接成的字符串 formatted_documents)添加到 batch_documents 列表中
|
| 813 |
+
|
| 814 |
+
# After fetching, prepare for batch processing if there are any
|
| 815 |
+
if batch_sequences:
|
| 816 |
+
print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
|
| 817 |
+
webpage_analyses = generate_webpage_to_reasonchain_batch( # 根据前面处理好的结果,生成新的推理
|
| 818 |
+
original_questions=batch_original_questions,
|
| 819 |
+
prev_reasonings=batch_prev_reasonings,
|
| 820 |
+
search_queries=batch_search_queries,
|
| 821 |
+
documents=batch_documents,
|
| 822 |
+
dataset_name=dataset_name,
|
| 823 |
+
batch_output_records=batch_output_records, # Pass the collection list
|
| 824 |
+
max_tokens=max_tokens,
|
| 825 |
+
)
|
| 826 |
+
print("Batch generation completed, assigning outputs to sequences...")
|
| 827 |
+
|
| 828 |
+
for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents): # 遍历批处理返回的 webpage_analyses,将处理结果与相应的序列 seq 进行一一对应
|
| 829 |
+
if isinstance(analysis, str): # 判断 analysis 是否是纯字符串。如果是字符串,说明直接可以追加到序列的文本中
|
| 830 |
+
append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n" # 封装处理结果,添加到序列的历史记录、提示和输出中
|
| 831 |
+
seq['prompt'] += append_text
|
| 832 |
+
seq['output'] += append_text
|
| 833 |
+
seq['history'].append(append_text) # 存的是每一次的webpage_analyses
|
| 834 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 835 |
+
else: # 如果 analysis 不是纯字符串,那么可能是一种特殊的结构,比如表示需要替换推理步骤的 dict 或其他格式
|
| 836 |
+
append_text = replace_recent_steps(seq['output'], analysis)
|
| 837 |
+
seq['prompt'] += append_text
|
| 838 |
+
seq['output'] += append_text
|
| 839 |
+
seq['history'].append(append_text)
|
| 840 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 841 |
+
|
| 842 |
+
# Check if all sequences are finished
|
| 843 |
+
# 保存active_sequences
|
| 844 |
+
active_sequences_part = [{ # 记录每个问题的搜索历史
|
| 845 |
+
'item': ele["item"],
|
| 846 |
+
'prompt': ele['prompt'],
|
| 847 |
+
'output': ele["output"],
|
| 848 |
+
'finished': ele["finished"], # 一开始均为未完成
|
| 849 |
+
'history':ele["history"],
|
| 850 |
+
'search_count': ele["search_count"],
|
| 851 |
+
'all_info': ele['all_info']
|
| 852 |
+
} for ele in active_sequences]
|
| 853 |
+
with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
|
| 854 |
+
json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
|
| 855 |
+
unfinished = [seq for seq in active_sequences if not seq['finished']] # 是否结束是基于模型是否生成了新的搜索
|
| 856 |
+
if not unfinished:
|
| 857 |
+
break
|
| 858 |
+
else:
|
| 859 |
+
if turn >= MAX_TURN:
|
| 860 |
+
print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
|
| 861 |
+
break
|
| 862 |
+
|
| 863 |
+
total_time = time.time() - start_time
|
| 864 |
+
print(f"Total time taken: {total_time} seconds")
|
| 865 |
+
|
| 866 |
+
# ---------------------- Save Batch Output Records to JSON File ----------------------
|
| 867 |
+
# Define output JSON file path
|
| 868 |
+
t = time.localtime()
|
| 869 |
+
batch_output_file = os.path.join(output_dir, f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
|
| 870 |
+
|
| 871 |
+
# Save batch_output_records to JSON file
|
| 872 |
+
with open(batch_output_file, 'w', encoding='utf-8') as f: # 这里存的是webpage推理时的输入和输出和提取后的信息
|
| 873 |
+
json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
|
| 874 |
+
|
| 875 |
+
print(f"Batch outputs saved to {batch_output_file}")
|
| 876 |
+
|
| 877 |
+
# Prepare output list for evaluation
|
| 878 |
+
output_list = [seq['output'] for seq in active_sequences]
|
| 879 |
+
|
| 880 |
+
# Run evaluation
|
| 881 |
+
run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
|
| 882 |
+
|
| 883 |
+
# 评测has answer信息
|
| 884 |
+
turn_files = os.listdir(output_dir)
|
| 885 |
+
turn_files = [file for file in turn_files if file.startswith("turn_")]
|
| 886 |
+
max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
|
| 887 |
+
|
| 888 |
+
max_turn_file_path = os.path.join(output_dir, max_turn_file)
|
| 889 |
+
print(f"max_turn_file_path: {max_turn_file_path}")
|
| 890 |
+
add_eval(model_path, max_turn_file_path)
|
| 891 |
+
|
| 892 |
+
# ---------------------- Update Search and URL Cache ----------------------
|
| 893 |
+
print('Updating Search and URL Cache...')
|
| 894 |
+
# Load existing caches or initialize empty dictionaries
|
| 895 |
+
if os.path.exists(search_cache_path):
|
| 896 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 897 |
+
search_cache_new = json.load(f)
|
| 898 |
+
else:
|
| 899 |
+
search_cache_new = {}
|
| 900 |
+
|
| 901 |
+
if os.path.exists(url_cache_path):
|
| 902 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 903 |
+
url_cache_new = json.load(f)
|
| 904 |
+
else:
|
| 905 |
+
url_cache_new = {}
|
| 906 |
+
|
| 907 |
+
search_cache.update(search_cache_new)
|
| 908 |
+
url_cache.update(url_cache_new)
|
| 909 |
+
|
| 910 |
+
save_caches()
|
| 911 |
+
|
| 912 |
+
print("Process completed.")
|
| 913 |
+
|
| 914 |
+
if __name__ == "__main__":
|
| 915 |
+
main()
|
deep_search/search_o1/scripts/run_search_o1.py
ADDED
|
@@ -0,0 +1,752 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_search_o1.py
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import re
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import string
|
| 10 |
+
from typing import Optional, Tuple, List, Dict
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
from transformers import AutoTokenizer
|
| 14 |
+
from vllm import LLM, SamplingParams
|
| 15 |
+
|
| 16 |
+
from bing_search import (
|
| 17 |
+
bing_web_search,
|
| 18 |
+
extract_relevant_info,
|
| 19 |
+
fetch_page_content,
|
| 20 |
+
extract_snippet_with_context
|
| 21 |
+
)
|
| 22 |
+
from evaluate import (
|
| 23 |
+
run_evaluation,
|
| 24 |
+
extract_answer
|
| 25 |
+
)
|
| 26 |
+
from prompts import (
|
| 27 |
+
get_gpqa_search_o1_instruction,
|
| 28 |
+
get_math_search_o1_instruction,
|
| 29 |
+
get_code_search_o1_instruction,
|
| 30 |
+
get_singleqa_search_o1_instruction,
|
| 31 |
+
get_multiqa_search_o1_instruction,
|
| 32 |
+
get_webpage_to_reasonchain_instruction,
|
| 33 |
+
get_task_instruction_openqa,
|
| 34 |
+
get_task_instruction_math,
|
| 35 |
+
get_task_instruction_multi_choice,
|
| 36 |
+
get_task_instruction_code,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Define special tokens
|
| 40 |
+
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
| 41 |
+
END_SEARCH_QUERY = "<|end_search_query|>"
|
| 42 |
+
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
| 43 |
+
END_SEARCH_RESULT = "<|end_search_result|>"
|
| 44 |
+
|
| 45 |
+
def parse_args():
|
| 46 |
+
parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
|
| 47 |
+
|
| 48 |
+
# Dataset and split configuration
|
| 49 |
+
parser.add_argument(
|
| 50 |
+
'--dataset_name',
|
| 51 |
+
type=str,
|
| 52 |
+
required=True,
|
| 53 |
+
choices=['gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
|
| 54 |
+
help="Name of the dataset to use."
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
parser.add_argument(
|
| 58 |
+
'--split',
|
| 59 |
+
type=str,
|
| 60 |
+
required=True,
|
| 61 |
+
choices=['test', 'diamond', 'main', 'extended'],
|
| 62 |
+
help="Dataset split to use."
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
parser.add_argument(
|
| 66 |
+
'--subset_num',
|
| 67 |
+
type=int,
|
| 68 |
+
default=-1,
|
| 69 |
+
help="Number of examples to process. Defaults to all if not specified."
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# Search and document retrieval configuration
|
| 73 |
+
parser.add_argument(
|
| 74 |
+
'--max_search_limit',
|
| 75 |
+
type=int,
|
| 76 |
+
default=10,
|
| 77 |
+
help="Maximum number of searches per question."
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
parser.add_argument(
|
| 81 |
+
'--max_turn',
|
| 82 |
+
type=int,
|
| 83 |
+
default=15,
|
| 84 |
+
help="Maximum number of turns."
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
parser.add_argument( # 使用搜索引擎时,返回的最大文档数
|
| 88 |
+
'--top_k',
|
| 89 |
+
type=int,
|
| 90 |
+
default=10,
|
| 91 |
+
help="Maximum number of search documents to return."
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
parser.add_argument(
|
| 95 |
+
'--max_doc_len',
|
| 96 |
+
type=int,
|
| 97 |
+
default=3000,
|
| 98 |
+
help="Maximum length of each searched document."
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
parser.add_argument(
|
| 102 |
+
'--use_jina',
|
| 103 |
+
type=bool,
|
| 104 |
+
default=True,
|
| 105 |
+
help="Whether to use Jina API for document fetching."
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
parser.add_argument(
|
| 109 |
+
'--jina_api_key',
|
| 110 |
+
type=str,
|
| 111 |
+
default='None',
|
| 112 |
+
help="Your Jina API Key to Fetch URL Content."
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Model configuration
|
| 116 |
+
parser.add_argument(
|
| 117 |
+
'--model_path',
|
| 118 |
+
type=str,
|
| 119 |
+
required=True,
|
| 120 |
+
help="Path to the pre-trained model."
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
# Sampling parameters
|
| 124 |
+
parser.add_argument(
|
| 125 |
+
'--temperature',
|
| 126 |
+
type=float,
|
| 127 |
+
default=0.7,
|
| 128 |
+
help="Sampling temperature."
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
parser.add_argument(
|
| 132 |
+
'--top_p',
|
| 133 |
+
type=float,
|
| 134 |
+
default=0.8,
|
| 135 |
+
help="Top-p sampling parameter."
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
parser.add_argument(
|
| 139 |
+
'--top_k_sampling',
|
| 140 |
+
type=int,
|
| 141 |
+
default=20,
|
| 142 |
+
help="Top-k sampling parameter."
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
parser.add_argument(
|
| 146 |
+
'--repetition_penalty',
|
| 147 |
+
type=float,
|
| 148 |
+
default=None,
|
| 149 |
+
help="Repetition penalty. If not set, defaults based on the model."
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
parser.add_argument(
|
| 153 |
+
'--max_tokens',
|
| 154 |
+
type=int,
|
| 155 |
+
default=32768,
|
| 156 |
+
help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
# Bing API Configuration
|
| 160 |
+
parser.add_argument(
|
| 161 |
+
'--bing_subscription_key',
|
| 162 |
+
type=str,
|
| 163 |
+
required=True,
|
| 164 |
+
help="Bing Search API subscription key."
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
parser.add_argument(
|
| 168 |
+
'--bing_endpoint',
|
| 169 |
+
type=str,
|
| 170 |
+
default="https://api.bing.microsoft.com/v7.0/search",
|
| 171 |
+
help="Bing Search API endpoint."
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
return parser.parse_args()
|
| 175 |
+
|
| 176 |
+
def main():
|
| 177 |
+
args = parse_args()
|
| 178 |
+
|
| 179 |
+
# Extract arguments
|
| 180 |
+
dataset_name = args.dataset_name
|
| 181 |
+
split = args.split
|
| 182 |
+
subset_num = args.subset_num
|
| 183 |
+
MAX_SEARCH_LIMIT = args.max_search_limit
|
| 184 |
+
MAX_TURN = args.max_turn
|
| 185 |
+
top_k = args.top_k
|
| 186 |
+
max_doc_len = args.max_doc_len
|
| 187 |
+
model_path = args.model_path
|
| 188 |
+
temperature = args.temperature
|
| 189 |
+
top_p = args.top_p
|
| 190 |
+
top_k_sampling = args.top_k_sampling
|
| 191 |
+
repetition_penalty = args.repetition_penalty
|
| 192 |
+
max_tokens = args.max_tokens
|
| 193 |
+
bing_subscription_key = args.bing_subscription_key
|
| 194 |
+
bing_endpoint = args.bing_endpoint
|
| 195 |
+
use_jina = args.use_jina
|
| 196 |
+
jina_api_key = args.jina_api_key
|
| 197 |
+
|
| 198 |
+
# Adjust parameters based on dataset
|
| 199 |
+
if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki', 'medmcqa', 'pubhealth']:
|
| 200 |
+
MAX_SEARCH_LIMIT = 5
|
| 201 |
+
if dataset_name in ['hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 202 |
+
MAX_SEARCH_LIMIT = 10
|
| 203 |
+
MAX_TURN = 15
|
| 204 |
+
top_k = 10
|
| 205 |
+
max_doc_len = 3000
|
| 206 |
+
|
| 207 |
+
if args.jina_api_key == 'None':
|
| 208 |
+
jina_api_key = None
|
| 209 |
+
|
| 210 |
+
# Set default repetition_penalty if not provided
|
| 211 |
+
if repetition_penalty is None:
|
| 212 |
+
repetition_penalty = 1.05 if 'qwq' in model_path.lower() else 1.0
|
| 213 |
+
|
| 214 |
+
# Data paths based on dataset
|
| 215 |
+
if dataset_name == 'livecode':
|
| 216 |
+
data_path = f'./data/LiveCodeBench/{split}.json'
|
| 217 |
+
elif dataset_name in ['math500', 'gpqa', 'aime', 'amc']:
|
| 218 |
+
data_path = f'./data/{dataset_name.upper()}/{split}.json'
|
| 219 |
+
else:
|
| 220 |
+
data_path = f'./data/QA_Datasets/{dataset_name}.json'
|
| 221 |
+
|
| 222 |
+
print('-----------------------')
|
| 223 |
+
print(f'Using {dataset_name} {split} set.')
|
| 224 |
+
print('-----------------------')
|
| 225 |
+
|
| 226 |
+
# ---------------------- Caching Mechanism ----------------------
|
| 227 |
+
# Define cache directories and file paths
|
| 228 |
+
cache_dir = './cache'
|
| 229 |
+
search_cache_path = os.path.join(cache_dir, 'search_cache.json')
|
| 230 |
+
url_cache_path = os.path.join(cache_dir, 'url_cache.json')
|
| 231 |
+
|
| 232 |
+
# Ensure cache directory exists
|
| 233 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 234 |
+
|
| 235 |
+
# Load existing caches or initialize empty dictionaries
|
| 236 |
+
if os.path.exists(search_cache_path):
|
| 237 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 238 |
+
search_cache = json.load(f)
|
| 239 |
+
else:
|
| 240 |
+
search_cache = {}
|
| 241 |
+
|
| 242 |
+
if os.path.exists(url_cache_path):
|
| 243 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 244 |
+
url_cache = json.load(f)
|
| 245 |
+
else:
|
| 246 |
+
url_cache = {}
|
| 247 |
+
|
| 248 |
+
# Function to save caches
|
| 249 |
+
def save_caches():
|
| 250 |
+
with open(search_cache_path, 'w', encoding='utf-8') as f:
|
| 251 |
+
json.dump(search_cache, f, ensure_ascii=False, indent=2)
|
| 252 |
+
with open(url_cache_path, 'w', encoding='utf-8') as f:
|
| 253 |
+
json.dump(url_cache, f, ensure_ascii=False, indent=2)
|
| 254 |
+
|
| 255 |
+
# ---------------------- Model Loading ----------------------
|
| 256 |
+
print(f"Loading tokenizer from {model_path}...")
|
| 257 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 258 |
+
if tokenizer.pad_token is None:
|
| 259 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 260 |
+
tokenizer.padding_side = 'left' # 主要是左填充
|
| 261 |
+
print("Tokenizer loaded successfully.")
|
| 262 |
+
|
| 263 |
+
# Define output directory based on model and dataset
|
| 264 |
+
if 'qwq' in model_path.lower():
|
| 265 |
+
if dataset_name in ['math500', 'gpqa', 'aime', 'amc', 'livecode']:
|
| 266 |
+
output_dir = f'./outputs/{dataset_name}.qwq.search_o1'
|
| 267 |
+
if dataset_name == 'gpqa' and (MAX_SEARCH_LIMIT != 5 or top_k != 10):
|
| 268 |
+
output_dir = f'./outputs/runs.analysis/{dataset_name}.qwq.search_o1.{MAX_SEARCH_LIMIT}.{top_k}'
|
| 269 |
+
else:
|
| 270 |
+
output_dir = f'./outputs/runs.qa/{dataset_name}.qwq.search_o1'
|
| 271 |
+
else:
|
| 272 |
+
model_short_name = model_path.split('/')[-1].lower().replace('-instruct', '')
|
| 273 |
+
output_dir = f'./outputs/runs.baselines/{dataset_name}.{model_short_name}.search_o1'
|
| 274 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 275 |
+
|
| 276 |
+
print(f"Loading model from {model_path}...")
|
| 277 |
+
print(f"device_count: {torch.cuda.device_count()}")
|
| 278 |
+
# Initialize the LLM
|
| 279 |
+
llm = LLM(
|
| 280 |
+
model=model_path,
|
| 281 |
+
tensor_parallel_size=torch.cuda.device_count(),
|
| 282 |
+
gpu_memory_utilization=0.95,
|
| 283 |
+
)
|
| 284 |
+
print("Model loaded successfully.")
|
| 285 |
+
|
| 286 |
+
# ---------------------- Data Loading ----------------------
|
| 287 |
+
print(f"Loading data from {data_path}...")
|
| 288 |
+
with open(data_path, 'r', encoding='utf-8') as json_file:
|
| 289 |
+
filtered_data = json.load(json_file)
|
| 290 |
+
print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
|
| 291 |
+
|
| 292 |
+
# ---------------------- Batch Generation Function ----------------------
|
| 293 |
+
def generate_webpage_to_reasonchain_batch( # 模型基于网页内容生成推理,然后从模型的回复中提取答案
|
| 294 |
+
original_questions: List[str],
|
| 295 |
+
prev_reasonings: List[str],
|
| 296 |
+
search_queries: List[str],
|
| 297 |
+
documents: List[str],
|
| 298 |
+
dataset_name: str,
|
| 299 |
+
batch_output_records: List[Dict], # New parameter to collect outputs
|
| 300 |
+
max_tokens: int = 32768,
|
| 301 |
+
coherent: bool = False,
|
| 302 |
+
) -> List[str]:
|
| 303 |
+
user_prompts = [ # 根据之前的推理,搜索query和搜索到的doc生成用户提示
|
| 304 |
+
get_webpage_to_reasonchain_instruction(r, sq, doc)
|
| 305 |
+
for r, sq, doc in zip(prev_reasonings, search_queries, documents)
|
| 306 |
+
]
|
| 307 |
+
|
| 308 |
+
prompts = [{"role": "user", "content": up} for up in user_prompts]
|
| 309 |
+
prompts = [tokenizer.apply_chat_template([p], tokenize=False, add_generation_prompt=True) for p in prompts]
|
| 310 |
+
|
| 311 |
+
output = llm.generate( # 生成模型回复
|
| 312 |
+
prompts,
|
| 313 |
+
sampling_params=SamplingParams(
|
| 314 |
+
max_tokens=max_tokens,
|
| 315 |
+
temperature=0.7,
|
| 316 |
+
top_p=0.8,
|
| 317 |
+
top_k=20,
|
| 318 |
+
repetition_penalty=1.05,
|
| 319 |
+
)
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
raw_outputs = [out.outputs[0].text for out in output]
|
| 323 |
+
extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs] # 提取模型基于网页生成的推理
|
| 324 |
+
|
| 325 |
+
for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
|
| 326 |
+
batch_output_records.append({
|
| 327 |
+
'prompt': p,
|
| 328 |
+
'raw_output': r,
|
| 329 |
+
'extracted_info': e
|
| 330 |
+
})
|
| 331 |
+
|
| 332 |
+
return extracted_infos
|
| 333 |
+
|
| 334 |
+
# ---------------------- Preparation of Input Prompts ----------------------
|
| 335 |
+
input_list = []
|
| 336 |
+
for item in filtered_data: # 生成prompts
|
| 337 |
+
question = item['Question']
|
| 338 |
+
|
| 339 |
+
if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 340 |
+
if dataset_name in ['nq', 'triviaqa']:
|
| 341 |
+
instruction = get_singleqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 342 |
+
elif dataset_name in ['hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 343 |
+
instruction = get_multiqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 344 |
+
if 'qwq' in model_path.lower():
|
| 345 |
+
user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 346 |
+
else:
|
| 347 |
+
user_prompt = get_task_instruction_openqa(question)
|
| 348 |
+
|
| 349 |
+
elif dataset_name in ['math500', 'aime', 'amc']:
|
| 350 |
+
instruction = get_math_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 351 |
+
if 'qwq' in model_path.lower():
|
| 352 |
+
user_prompt = get_task_instruction_math(question, model_name='qwq')
|
| 353 |
+
else:
|
| 354 |
+
user_prompt = get_task_instruction_math(question)
|
| 355 |
+
|
| 356 |
+
elif dataset_name == 'gpqa':
|
| 357 |
+
instruction = get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 358 |
+
if 'qwq' in model_path.lower():
|
| 359 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
|
| 360 |
+
elif 'llama' in model_path.lower():
|
| 361 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='llama')
|
| 362 |
+
else:
|
| 363 |
+
user_prompt = get_task_instruction_multi_choice(question)
|
| 364 |
+
|
| 365 |
+
elif dataset_name == 'livecode':
|
| 366 |
+
instruction = get_code_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 367 |
+
question_title = item.get('question_title', '')
|
| 368 |
+
if 'qwq' in model_path.lower():
|
| 369 |
+
user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
|
| 370 |
+
else:
|
| 371 |
+
user_prompt = get_task_instruction_code(question)
|
| 372 |
+
else:
|
| 373 |
+
user_prompt = "" # Default to empty if dataset not matched
|
| 374 |
+
|
| 375 |
+
prompt = [{"role": "user", "content": instruction + user_prompt}] # instruction是告诉模型怎么进行搜索,user_prompt是用户具体问题
|
| 376 |
+
prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
|
| 377 |
+
input_list.append(prompt)
|
| 378 |
+
|
| 379 |
+
if subset_num != -1:
|
| 380 |
+
input_list = input_list[:subset_num]
|
| 381 |
+
filtered_data = filtered_data[:subset_num]
|
| 382 |
+
|
| 383 |
+
# Initialize active sequences
|
| 384 |
+
active_sequences = [{ # 记录每个问题的搜索历史
|
| 385 |
+
'item': item,
|
| 386 |
+
'prompt': prompt,
|
| 387 |
+
'output': '',
|
| 388 |
+
'finished': False, # 一开始均为未完成
|
| 389 |
+
'history': [],
|
| 390 |
+
'search_count': 0,
|
| 391 |
+
'executed_search_queries': set(),
|
| 392 |
+
} for item, prompt in zip(filtered_data, input_list)]
|
| 393 |
+
|
| 394 |
+
# ---------------------- Set Max Tokens ----------------------
|
| 395 |
+
if 'qwq' in model_path.lower():
|
| 396 |
+
if dataset_name in ['aime', 'amc', 'livecode']:
|
| 397 |
+
max_tokens = 32768
|
| 398 |
+
else:
|
| 399 |
+
max_tokens = 20480
|
| 400 |
+
else:
|
| 401 |
+
max_tokens = 8192
|
| 402 |
+
|
| 403 |
+
# ---------------------- Generation Function ----------------------
|
| 404 |
+
def run_generation(sequences: List[Dict], max_tokens: int) -> List:
|
| 405 |
+
prompts = [s['prompt'] for s in sequences] # 提取prompt
|
| 406 |
+
sampling_params = SamplingParams(
|
| 407 |
+
max_tokens=max_tokens,
|
| 408 |
+
temperature=temperature,
|
| 409 |
+
top_p=top_p,
|
| 410 |
+
top_k=top_k_sampling,
|
| 411 |
+
repetition_penalty=repetition_penalty,
|
| 412 |
+
stop=[END_SEARCH_QUERY, tokenizer.eos_token],
|
| 413 |
+
include_stop_str_in_output=True,
|
| 414 |
+
)
|
| 415 |
+
output_list = llm.generate(prompts, sampling_params=sampling_params) # 模型根据prompt生成回答
|
| 416 |
+
return output_list
|
| 417 |
+
|
| 418 |
+
# Function to extract text between two tags 提取位于 start_tag 和 end_tag 之间的内容
|
| 419 |
+
def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
|
| 420 |
+
pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
|
| 421 |
+
matches = re.findall(pattern, text, flags=re.DOTALL)
|
| 422 |
+
if matches:
|
| 423 |
+
return matches[-1].strip()
|
| 424 |
+
return None
|
| 425 |
+
|
| 426 |
+
def replace_recent_steps(origin_str, replace_str): # 使用replace_str更新origin_str
|
| 427 |
+
"""
|
| 428 |
+
Replaces specific steps in the original reasoning steps with new steps.
|
| 429 |
+
If a replacement step contains "DELETE THIS STEP", that step is removed.
|
| 430 |
+
|
| 431 |
+
Parameters:
|
| 432 |
+
- origin_str (str): The original reasoning steps.
|
| 433 |
+
- replace_str (str): The steps to replace or delete.
|
| 434 |
+
|
| 435 |
+
Returns:
|
| 436 |
+
- str: The updated reasoning steps after applying replacements.
|
| 437 |
+
这个函数的主要功能是替换给定的推理步骤(reasoning steps),
|
| 438 |
+
并根据传入的 replace_str 进行更新。
|
| 439 |
+
如果新的步骤包含 "DELETE THIS STEP",则删除该步骤
|
| 440 |
+
"""
|
| 441 |
+
|
| 442 |
+
def parse_steps(text):
|
| 443 |
+
"""
|
| 444 |
+
Parses the reasoning steps from a given text.
|
| 445 |
+
|
| 446 |
+
Parameters:
|
| 447 |
+
- text (str): The text containing reasoning steps.
|
| 448 |
+
|
| 449 |
+
Returns:
|
| 450 |
+
- dict: A dictionary mapping step numbers to their content.
|
| 451 |
+
"""
|
| 452 |
+
step_pattern = re.compile(r"Step\s+(\d+):\s*") # 这个模式会匹配 "Step" 后面跟一个或多个空格,然后是一个数字(步骤编号),最后是冒号
|
| 453 |
+
steps = {}
|
| 454 |
+
current_step_num = None
|
| 455 |
+
current_content = []
|
| 456 |
+
|
| 457 |
+
for line in text.splitlines(): # 将输入的文本按行分割,并逐行遍历。每一行会被检查是否包含一个步骤
|
| 458 |
+
step_match = step_pattern.match(line)
|
| 459 |
+
if step_match: # 匹配到一个新的步骤
|
| 460 |
+
# If there's an ongoing step, save its content,如果当前的步骤不为空,将其为上一个步骤,先将上一个步骤的内容(存在current_content中)保存,然后再更新current_step_num和current_content
|
| 461 |
+
if current_step_num is not None:
|
| 462 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 463 |
+
current_step_num = int(step_match.group(1))
|
| 464 |
+
content = line[step_match.end():].strip()
|
| 465 |
+
current_content = [content] if content else []
|
| 466 |
+
else:
|
| 467 |
+
if current_step_num is not None:
|
| 468 |
+
current_content.append(line)
|
| 469 |
+
|
| 470 |
+
# Save the last step if any
|
| 471 |
+
if current_step_num is not None: # 保存最后一个步骤
|
| 472 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 473 |
+
|
| 474 |
+
return steps
|
| 475 |
+
|
| 476 |
+
# Parse the original and replacement steps
|
| 477 |
+
origin_steps = parse_steps(origin_str) # 解析原始的推理步骤
|
| 478 |
+
replace_steps = parse_steps(replace_str) # 解析要替换的推理步骤
|
| 479 |
+
|
| 480 |
+
# Apply replacements
|
| 481 |
+
for step_num, content in replace_steps.items(): # 遍历要替换的步骤
|
| 482 |
+
if "DELETE THIS STEP" in content:
|
| 483 |
+
# Remove the step if it exists
|
| 484 |
+
if step_num in origin_steps: # 如果要删除的步骤在原始的推理步骤中存在,则删除该步骤
|
| 485 |
+
del origin_steps[step_num]
|
| 486 |
+
else: # 如果要替换的步骤不是要删除的步骤,则替换该步骤
|
| 487 |
+
# Replace or add the step
|
| 488 |
+
origin_steps[step_num] = content
|
| 489 |
+
|
| 490 |
+
# Sort the steps by step number
|
| 491 |
+
sorted_steps = sorted(origin_steps.items()) # 按照步骤编号对步骤进行排序
|
| 492 |
+
|
| 493 |
+
# Reconstruct the reasoning steps as a single string
|
| 494 |
+
new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps]) # 根据排序后的步骤构建新的推理步骤字符串,步骤之间以两个换行符分隔
|
| 495 |
+
|
| 496 |
+
return new_reasoning_steps
|
| 497 |
+
|
| 498 |
+
# ---------------------- Initialize Collection Structure ----------------------
|
| 499 |
+
# Initialize a list to collect batch outputs
|
| 500 |
+
batch_output_records = []
|
| 501 |
+
|
| 502 |
+
start_time = time.time()
|
| 503 |
+
turn = 0
|
| 504 |
+
|
| 505 |
+
# 流程
|
| 506 |
+
# 首先根据prompt让模型生成回复
|
| 507 |
+
# 从模型的回复中提取搜索查询
|
| 508 |
+
# 如果有(回复要以END_SEARCH_QUERY结尾)
|
| 509 |
+
# 根据搜索查询,从互联网上提取相关信息
|
| 510 |
+
# 处理查询的信息
|
| 511 |
+
# 让模型基于之前的步骤,检索query和查询到的信息生成新的推理,得到search result
|
| 512 |
+
# 然后回到第一步(这里模型就会根据前面的search result,再次生成新的回复
|
| 513 |
+
# 如果没有查询则该条问题结束
|
| 514 |
+
|
| 515 |
+
# Main loop until all sequences are finished or maximum turns reached
|
| 516 |
+
while True:
|
| 517 |
+
# Identify sequences that need generation
|
| 518 |
+
sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']] # 筛选出需要生成的新内容的序列,active_sequences 是一个包含所有活跃序列的列表
|
| 519 |
+
|
| 520 |
+
if sequences_needing_generation:
|
| 521 |
+
turn += 1
|
| 522 |
+
print(f'\n-------------- Turn {turn} --------------')
|
| 523 |
+
print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
|
| 524 |
+
outputs = run_generation(sequences_needing_generation, max_tokens) # 根据prompt
|
| 525 |
+
print("Generation completed, processing outputs...")
|
| 526 |
+
|
| 527 |
+
# Initialize batch variables
|
| 528 |
+
batch_relevant_info = []
|
| 529 |
+
batch_original_questions = []
|
| 530 |
+
batch_prev_reasonings = []
|
| 531 |
+
batch_search_queries = []
|
| 532 |
+
batch_documents = []
|
| 533 |
+
batch_sequences = []
|
| 534 |
+
|
| 535 |
+
# Collect URLs to fetch across all sequences
|
| 536 |
+
all_urls_to_fetch = set() # 初始化一个集合 all_urls_to_fetch 用来收集所有需要获取的 URL
|
| 537 |
+
url_snippets = {}
|
| 538 |
+
url_sequence_map = {} # Map URL to list of sequences needing it
|
| 539 |
+
|
| 540 |
+
# Process each sequence and collect URLs
|
| 541 |
+
for seq, out in zip(sequences_needing_generation, outputs): # 遍历需要生成新内容的序列,并生成新内容,同时收集需要获取的 URL
|
| 542 |
+
text = out.outputs[0].text # 将生成的文本添加到序列的历史记录、提示和输出中
|
| 543 |
+
seq['history'].append(text)
|
| 544 |
+
# Append generated text to prompt and output
|
| 545 |
+
seq['prompt'] += text
|
| 546 |
+
seq['output'] += text
|
| 547 |
+
|
| 548 |
+
# Extract search query
|
| 549 |
+
search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY) # 提取搜索查询
|
| 550 |
+
|
| 551 |
+
# If a search query is present and needs to be executed
|
| 552 |
+
if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
|
| 553 |
+
if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
|
| 554 |
+
# Execute search, use cache if available
|
| 555 |
+
if search_query in search_cache:
|
| 556 |
+
results = search_cache[search_query] # 如果搜索查询结果在缓存中存在,则从缓存中获取结果
|
| 557 |
+
print(f"Using cached search results for query: \"{search_query}\"")
|
| 558 |
+
else:
|
| 559 |
+
try:
|
| 560 |
+
results = bing_web_search(search_query, bing_subscription_key, bing_endpoint, market='en-US', language='en') # 执行搜索
|
| 561 |
+
search_cache[search_query] = results # 将搜索结果添加到缓存中
|
| 562 |
+
print(f"Executed and cached search for query: \"{search_query}\"")
|
| 563 |
+
except Exception as e:
|
| 564 |
+
print(f"Error during search query '{search_query}': {e}")
|
| 565 |
+
search_cache[search_query] = {}
|
| 566 |
+
results = {}
|
| 567 |
+
|
| 568 |
+
# Extract relevant information from Bing search results
|
| 569 |
+
relevant_info = extract_relevant_info(results)[:top_k] # 从搜索结果中提取出最相关的信息
|
| 570 |
+
seq['relevant_info'] = relevant_info
|
| 571 |
+
|
| 572 |
+
# Extract URLs and snippets
|
| 573 |
+
urls_to_fetch = [it['url'] for it in relevant_info] # 从搜索结果中提取出所有 URL
|
| 574 |
+
snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info} # 创建一个字典 snippets,将 URL 映射到它们对应的片段(snippet)。如果 snippet 字段存在,则把它加入字典
|
| 575 |
+
|
| 576 |
+
# Filter URLs that are not cached
|
| 577 |
+
urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache] # 筛选出所有没有被缓存的 UR
|
| 578 |
+
cached_urls = [u for u in urls_to_fetch if u in url_cache] # 选出已经缓存的 URL(即在 url_cache 中存在的 URL)。这些 URL 已经被处理过,不需要再次请求
|
| 579 |
+
|
| 580 |
+
# Store info for all_urls_to_fetch and url_snippets
|
| 581 |
+
for url in urls_to_fetch_filtered:
|
| 582 |
+
all_urls_to_fetch.add(url)
|
| 583 |
+
url_snippets[url] = snippets.get(url, "") # 将每个 URL 对应的片段存储到 url_snippets 字典中
|
| 584 |
+
|
| 585 |
+
all_reasoning_steps = seq['output']
|
| 586 |
+
all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n") # 将连续的空行(\n\n)替换为单个换行符(\n),然后按换行符拆分文本,得到每个推理步骤单独的一行
|
| 587 |
+
|
| 588 |
+
truncated_prev_reasoning = ""
|
| 589 |
+
for i, step in enumerate(all_reasoning_steps):
|
| 590 |
+
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n" # 遍历 all_reasoning_steps 中的每个步骤,并将每个步骤编号和步骤内容格式化后,添加到 truncated_prev_reasoning 字符串中。这样生成一个包含步骤编号和内容的字符串
|
| 591 |
+
|
| 592 |
+
prev_steps = truncated_prev_reasoning.split('\n\n') # 将推理步骤字符串 truncated_prev_reasoning 按照每两个换行符拆分成多个步骤
|
| 593 |
+
if len(prev_steps) <= 5: # 如果步骤的数量不超过 5,直接保留所有步骤
|
| 594 |
+
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
| 595 |
+
else:
|
| 596 |
+
truncated_prev_reasoning = ''
|
| 597 |
+
for i, step in enumerate(prev_steps): # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 598 |
+
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
| 599 |
+
truncated_prev_reasoning += step + '\n\n'
|
| 600 |
+
else: # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 601 |
+
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
| 602 |
+
truncated_prev_reasoning += '...\n\n'
|
| 603 |
+
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
| 604 |
+
|
| 605 |
+
# Collect parameters for batch processing
|
| 606 |
+
batch_relevant_info.append(relevant_info) # 搜索出来的信息
|
| 607 |
+
batch_original_questions.append(seq['item']['Question']) # 原始问题
|
| 608 |
+
batch_prev_reasonings.append(truncated_prev_reasoning) # 之前的推理步骤
|
| 609 |
+
batch_search_queries.append(search_query) # 搜索查询
|
| 610 |
+
batch_sequences.append(seq)
|
| 611 |
+
|
| 612 |
+
# Update search count and executed queries
|
| 613 |
+
seq['search_count'] += 1 # 更新搜索计数
|
| 614 |
+
seq['executed_search_queries'].add(search_query) # 将已执行的搜索查询添加到集合中
|
| 615 |
+
|
| 616 |
+
elif seq['search_count'] >= MAX_SEARCH_LIMIT: # 如果搜索次数达到或超过该限制,则返回一条消息,通知该查询无法再进行
|
| 617 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
| 618 |
+
seq['prompt'] += limit_message
|
| 619 |
+
seq['output'] += limit_message
|
| 620 |
+
seq['history'].append(limit_message)
|
| 621 |
+
print(f"Search limit reached for query: \"{search_query}\"")
|
| 622 |
+
|
| 623 |
+
elif search_query in seq['executed_search_queries']: # 如果当前查询已经执行过,则返回一个消息,提示用户查询已重复,并引导其查看之前的结果
|
| 624 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
| 625 |
+
seq['prompt'] += limit_message
|
| 626 |
+
seq['output'] += limit_message
|
| 627 |
+
seq['history'].append(limit_message)
|
| 628 |
+
print(f"Repeated search for query: \"{search_query}\"")
|
| 629 |
+
|
| 630 |
+
else: # 如果当前序列没有需要执行的搜索查询,则将该序列标记为完成,并打印提示信息
|
| 631 |
+
# If no search query needs to be executed, mark the sequence as finished
|
| 632 |
+
seq['finished'] = True
|
| 633 |
+
print("Sequence marked as complete.")
|
| 634 |
+
|
| 635 |
+
print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
|
| 636 |
+
print(f"all_urls_to_fetch: {all_urls_to_fetch}")
|
| 637 |
+
# Batch fetch all URLs at once to optimize speed
|
| 638 |
+
if all_urls_to_fetch:
|
| 639 |
+
print(f"Fetching {len(all_urls_to_fetch)} URLs...")
|
| 640 |
+
try:
|
| 641 |
+
fetched_contents = fetch_page_content( # 一次性获取所有 URL 的中搜索出来的内容
|
| 642 |
+
list(all_urls_to_fetch),
|
| 643 |
+
use_jina=use_jina,
|
| 644 |
+
jina_api_key=jina_api_key,
|
| 645 |
+
# snippets=url_snippets # Do not pass snippets when updating url_cache directly
|
| 646 |
+
)
|
| 647 |
+
print(f"Fetched {len(fetched_contents)} URLs successfully.")
|
| 648 |
+
except Exception as e:
|
| 649 |
+
print(f"Error during batch URL fetching: {e}")
|
| 650 |
+
fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
|
| 651 |
+
# Update cache with fetched contents
|
| 652 |
+
for url, content in fetched_contents.items(): # 将获取的内容添加到 url_cache 中
|
| 653 |
+
url_cache[url] = content
|
| 654 |
+
|
| 655 |
+
# After fetching, prepare formatted documents for batch processing
|
| 656 |
+
for relevant_info in batch_relevant_info:
|
| 657 |
+
formatted_documents = "" # 初始化一个空字符串 formatted_documents,用于拼接本次要处理的所有网页信息。后面会将其添加到 batch_documents 列表中
|
| 658 |
+
for i, doc_info in enumerate(relevant_info):
|
| 659 |
+
url = doc_info['url']
|
| 660 |
+
raw_context = url_cache.get(url, "") # 获取 url 对应的内容
|
| 661 |
+
doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
|
| 662 |
+
success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
|
| 663 |
+
if success:
|
| 664 |
+
context = filtered_context
|
| 665 |
+
else: # 否则就取 raw_context 的前 max_doc_len * 2 个字符,作为一个有限的片段,避免过长导致后续处理负担
|
| 666 |
+
context = raw_context[:max_doc_len*2]
|
| 667 |
+
|
| 668 |
+
doc_info['context'] = context
|
| 669 |
+
formatted_documents += f"**Web Page {i + 1}:**\n"
|
| 670 |
+
formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
|
| 671 |
+
|
| 672 |
+
batch_documents.append(formatted_documents) # 将本组搜索结果的所有信息(拼接成的字符串 formatted_documents)添加到 batch_documents 列表中
|
| 673 |
+
|
| 674 |
+
# After fetching, prepare for batch processing if there are any
|
| 675 |
+
if batch_sequences:
|
| 676 |
+
print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
|
| 677 |
+
webpage_analyses = generate_webpage_to_reasonchain_batch( # 根据前面处理好的结果,生成新的推理
|
| 678 |
+
original_questions=batch_original_questions,
|
| 679 |
+
prev_reasonings=batch_prev_reasonings,
|
| 680 |
+
search_queries=batch_search_queries,
|
| 681 |
+
documents=batch_documents,
|
| 682 |
+
dataset_name=dataset_name,
|
| 683 |
+
batch_output_records=batch_output_records, # Pass the collection list
|
| 684 |
+
max_tokens=max_tokens,
|
| 685 |
+
)
|
| 686 |
+
print("Batch generation completed, assigning outputs to sequences...")
|
| 687 |
+
|
| 688 |
+
for seq, analysis in zip(batch_sequences, webpage_analyses): # 遍历批处理返回的 webpage_analyses,将处理结果与相应的序列 seq 进行一一对应
|
| 689 |
+
if isinstance(analysis, str): # 判断 analysis 是否是纯字符串。如果是字符串,说明直接可以追加到序列的文本中
|
| 690 |
+
append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n" # 封装处理结果,添加到序列的历史记录、提示和输出中
|
| 691 |
+
seq['prompt'] += append_text
|
| 692 |
+
seq['output'] += append_text
|
| 693 |
+
seq['history'].append(append_text)
|
| 694 |
+
else: # 如果 analysis 不是纯字符串,那么可能是一种特殊的结构,比如表示需要替换推理步骤的 dict 或其他格式
|
| 695 |
+
append_text = replace_recent_steps(seq['output'], analysis)
|
| 696 |
+
seq['prompt'] += append_text
|
| 697 |
+
seq['output'] += append_text
|
| 698 |
+
seq['history'].append(append_text)
|
| 699 |
+
|
| 700 |
+
# Check if all sequences are finished
|
| 701 |
+
unfinished = [seq for seq in active_sequences if not seq['finished']] # 是否结束是基于模型是否生成了新的搜索
|
| 702 |
+
if not unfinished:
|
| 703 |
+
break
|
| 704 |
+
else:
|
| 705 |
+
if turn >= MAX_TURN:
|
| 706 |
+
print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
|
| 707 |
+
break
|
| 708 |
+
|
| 709 |
+
total_time = time.time() - start_time
|
| 710 |
+
print(f"Total time taken: {total_time} seconds")
|
| 711 |
+
|
| 712 |
+
# ---------------------- Save Batch Output Records to JSON File ----------------------
|
| 713 |
+
# Define output JSON file path
|
| 714 |
+
t = time.localtime()
|
| 715 |
+
batch_output_file = os.path.join(output_dir, f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
|
| 716 |
+
|
| 717 |
+
# Save batch_output_records to JSON file
|
| 718 |
+
with open(batch_output_file, 'w', encoding='utf-8') as f:
|
| 719 |
+
json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
|
| 720 |
+
|
| 721 |
+
print(f"Batch outputs saved to {batch_output_file}")
|
| 722 |
+
|
| 723 |
+
# Prepare output list for evaluation
|
| 724 |
+
output_list = [seq['output'] for seq in active_sequences]
|
| 725 |
+
|
| 726 |
+
# Run evaluation
|
| 727 |
+
run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
|
| 728 |
+
|
| 729 |
+
# ---------------------- Update Search and URL Cache ----------------------
|
| 730 |
+
print('Updating Search and URL Cache...')
|
| 731 |
+
# Load existing caches or initialize empty dictionaries
|
| 732 |
+
if os.path.exists(search_cache_path):
|
| 733 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 734 |
+
search_cache_new = json.load(f)
|
| 735 |
+
else:
|
| 736 |
+
search_cache_new = {}
|
| 737 |
+
|
| 738 |
+
if os.path.exists(url_cache_path):
|
| 739 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 740 |
+
url_cache_new = json.load(f)
|
| 741 |
+
else:
|
| 742 |
+
url_cache_new = {}
|
| 743 |
+
|
| 744 |
+
search_cache.update(search_cache_new)
|
| 745 |
+
url_cache.update(url_cache_new)
|
| 746 |
+
|
| 747 |
+
save_caches()
|
| 748 |
+
|
| 749 |
+
print("Process completed.")
|
| 750 |
+
|
| 751 |
+
if __name__ == "__main__":
|
| 752 |
+
main()
|
deep_search/search_o1/scripts/run_search_o1_2.py
ADDED
|
@@ -0,0 +1,776 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_search_o1.py
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import re
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import string
|
| 10 |
+
from typing import Optional, Tuple, List, Dict
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
from transformers import AutoTokenizer
|
| 14 |
+
from vllm import LLM, SamplingParams
|
| 15 |
+
|
| 16 |
+
from bing_search import (
|
| 17 |
+
bing_web_search,
|
| 18 |
+
extract_relevant_info,
|
| 19 |
+
fetch_page_content,
|
| 20 |
+
extract_snippet_with_context
|
| 21 |
+
)
|
| 22 |
+
from evaluate import (
|
| 23 |
+
run_evaluation,
|
| 24 |
+
extract_answer
|
| 25 |
+
)
|
| 26 |
+
from prompts import (
|
| 27 |
+
get_gpqa_search_o1_instruction,
|
| 28 |
+
get_math_search_o1_instruction,
|
| 29 |
+
get_code_search_o1_instruction,
|
| 30 |
+
get_singleqa_search_o1_instruction,
|
| 31 |
+
get_multiqa_search_o1_instruction,
|
| 32 |
+
get_webpage_to_reasonchain_instruction,
|
| 33 |
+
get_task_instruction_openqa,
|
| 34 |
+
get_task_instruction_math,
|
| 35 |
+
get_task_instruction_multi_choice,
|
| 36 |
+
get_task_instruction_code,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Define special tokens
|
| 40 |
+
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
| 41 |
+
END_SEARCH_QUERY = "<|end_search_query|>"
|
| 42 |
+
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
| 43 |
+
END_SEARCH_RESULT = "<|end_search_result|>"
|
| 44 |
+
|
| 45 |
+
def parse_args():
|
| 46 |
+
parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
|
| 47 |
+
|
| 48 |
+
# Dataset and split configuration
|
| 49 |
+
parser.add_argument(
|
| 50 |
+
'--dataset_name',
|
| 51 |
+
type=str,
|
| 52 |
+
required=True,
|
| 53 |
+
choices=['gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
|
| 54 |
+
help="Name of the dataset to use."
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
parser.add_argument(
|
| 58 |
+
'--split',
|
| 59 |
+
type=str,
|
| 60 |
+
required=True,
|
| 61 |
+
choices=['test', 'diamond', 'main', 'extended'],
|
| 62 |
+
help="Dataset split to use."
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
parser.add_argument(
|
| 66 |
+
'--subset_num',
|
| 67 |
+
type=int,
|
| 68 |
+
default=-1,
|
| 69 |
+
help="Number of examples to process. Defaults to all if not specified."
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# Search and document retrieval configuration
|
| 73 |
+
parser.add_argument(
|
| 74 |
+
'--max_search_limit',
|
| 75 |
+
type=int,
|
| 76 |
+
default=10,
|
| 77 |
+
help="Maximum number of searches per question."
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
parser.add_argument(
|
| 81 |
+
'--max_turn',
|
| 82 |
+
type=int,
|
| 83 |
+
default=15,
|
| 84 |
+
help="Maximum number of turns."
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
parser.add_argument( # 使用搜索引擎时,返回的最大文档数
|
| 88 |
+
'--top_k',
|
| 89 |
+
type=int,
|
| 90 |
+
default=10,
|
| 91 |
+
help="Maximum number of search documents to return."
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
parser.add_argument(
|
| 95 |
+
'--max_doc_len',
|
| 96 |
+
type=int,
|
| 97 |
+
default=3000,
|
| 98 |
+
help="Maximum length of each searched document."
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
parser.add_argument(
|
| 102 |
+
'--use_jina',
|
| 103 |
+
type=bool,
|
| 104 |
+
default=False,
|
| 105 |
+
help="Whether to use Jina API for document fetching."
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
parser.add_argument(
|
| 109 |
+
'--jina_api_key',
|
| 110 |
+
type=str,
|
| 111 |
+
default='None',
|
| 112 |
+
help="Your Jina API Key to Fetch URL Content."
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Model configuration
|
| 116 |
+
parser.add_argument(
|
| 117 |
+
'--model_path',
|
| 118 |
+
type=str,
|
| 119 |
+
required=True,
|
| 120 |
+
help="Path to the pre-trained model."
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
# Sampling parameters
|
| 124 |
+
parser.add_argument(
|
| 125 |
+
'--temperature',
|
| 126 |
+
type=float,
|
| 127 |
+
default=0.7,
|
| 128 |
+
help="Sampling temperature."
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
parser.add_argument(
|
| 132 |
+
'--top_p',
|
| 133 |
+
type=float,
|
| 134 |
+
default=0.8,
|
| 135 |
+
help="Top-p sampling parameter."
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
parser.add_argument(
|
| 139 |
+
'--top_k_sampling',
|
| 140 |
+
type=int,
|
| 141 |
+
default=20,
|
| 142 |
+
help="Top-k sampling parameter."
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
parser.add_argument(
|
| 146 |
+
'--repetition_penalty',
|
| 147 |
+
type=float,
|
| 148 |
+
default=None,
|
| 149 |
+
help="Repetition penalty. If not set, defaults based on the model."
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
parser.add_argument(
|
| 153 |
+
'--max_tokens',
|
| 154 |
+
type=int,
|
| 155 |
+
default=32768,
|
| 156 |
+
help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
# Bing API Configuration
|
| 160 |
+
parser.add_argument(
|
| 161 |
+
'--bing_subscription_key',
|
| 162 |
+
type=str,
|
| 163 |
+
required=True,
|
| 164 |
+
help="Bing Search API subscription key."
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
parser.add_argument(
|
| 168 |
+
'--bing_endpoint',
|
| 169 |
+
type=str,
|
| 170 |
+
default="https://api.bing.microsoft.com/v7.0/search",
|
| 171 |
+
help="Bing Search API endpoint."
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
return parser.parse_args()
|
| 177 |
+
|
| 178 |
+
def main():
|
| 179 |
+
args = parse_args()
|
| 180 |
+
print(f"args.use_jina: {args.use_jina}")
|
| 181 |
+
# Extract arguments
|
| 182 |
+
dataset_name = args.dataset_name
|
| 183 |
+
split = args.split
|
| 184 |
+
subset_num = args.subset_num
|
| 185 |
+
MAX_SEARCH_LIMIT = args.max_search_limit
|
| 186 |
+
MAX_TURN = args.max_turn
|
| 187 |
+
top_k = args.top_k
|
| 188 |
+
max_doc_len = args.max_doc_len
|
| 189 |
+
model_path = args.model_path
|
| 190 |
+
temperature = args.temperature
|
| 191 |
+
top_p = args.top_p
|
| 192 |
+
top_k_sampling = args.top_k_sampling
|
| 193 |
+
repetition_penalty = args.repetition_penalty
|
| 194 |
+
max_tokens = args.max_tokens
|
| 195 |
+
bing_subscription_key = args.bing_subscription_key
|
| 196 |
+
bing_endpoint = args.bing_endpoint
|
| 197 |
+
use_jina = args.use_jina
|
| 198 |
+
jina_api_key = args.jina_api_key
|
| 199 |
+
use_jina = False
|
| 200 |
+
print(f"use_jina: {use_jina}")
|
| 201 |
+
# Adjust parameters based on dataset
|
| 202 |
+
if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki', 'medmcqa', 'pubhealth']:
|
| 203 |
+
MAX_SEARCH_LIMIT = 5
|
| 204 |
+
if dataset_name in ['hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 205 |
+
MAX_SEARCH_LIMIT = 10
|
| 206 |
+
MAX_TURN = 15
|
| 207 |
+
top_k = 10
|
| 208 |
+
max_doc_len = 3000
|
| 209 |
+
|
| 210 |
+
if args.jina_api_key == 'None':
|
| 211 |
+
jina_api_key = None
|
| 212 |
+
|
| 213 |
+
# Set default repetition_penalty if not provided
|
| 214 |
+
if repetition_penalty is None:
|
| 215 |
+
repetition_penalty = 1.05 if 'qwq' in model_path.lower() else 1.0
|
| 216 |
+
|
| 217 |
+
# Data paths based on dataset
|
| 218 |
+
if dataset_name == 'livecode':
|
| 219 |
+
data_path = f'./data/LiveCodeBench/{split}.json'
|
| 220 |
+
elif dataset_name in ['math500', 'gpqa', 'aime', 'amc']:
|
| 221 |
+
data_path = f'./data/{dataset_name.upper()}/{split}.json'
|
| 222 |
+
else:
|
| 223 |
+
data_path = f'./data/QA_Datasets/{dataset_name}.json'
|
| 224 |
+
|
| 225 |
+
print('-----------------------')
|
| 226 |
+
print(f'Using {dataset_name} {split} set.')
|
| 227 |
+
print('-----------------------')
|
| 228 |
+
|
| 229 |
+
# ---------------------- Caching Mechanism ----------------------
|
| 230 |
+
# Define cache directories and file paths
|
| 231 |
+
# cache_dir = './cache'
|
| 232 |
+
model_name = model_path.split('/')[-1].replace('-instruct', '')
|
| 233 |
+
cache_dir = f'./cache_{dataset_name}_{model_name}'
|
| 234 |
+
search_cache_path = os.path.join(cache_dir, 'search_cache.json')
|
| 235 |
+
url_cache_path = os.path.join(cache_dir, 'url_cache.json')
|
| 236 |
+
|
| 237 |
+
# Ensure cache directory exists
|
| 238 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 239 |
+
|
| 240 |
+
# Load existing caches or initialize empty dictionaries
|
| 241 |
+
if os.path.exists(search_cache_path):
|
| 242 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 243 |
+
search_cache = json.load(f)
|
| 244 |
+
else:
|
| 245 |
+
search_cache = {}
|
| 246 |
+
|
| 247 |
+
if os.path.exists(url_cache_path):
|
| 248 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 249 |
+
url_cache = json.load(f)
|
| 250 |
+
else:
|
| 251 |
+
url_cache = {}
|
| 252 |
+
|
| 253 |
+
# Function to save caches
|
| 254 |
+
def save_caches():
|
| 255 |
+
with open(search_cache_path, 'w', encoding='utf-8') as f:
|
| 256 |
+
json.dump(search_cache, f, ensure_ascii=False, indent=2)
|
| 257 |
+
with open(url_cache_path, 'w', encoding='utf-8') as f:
|
| 258 |
+
json.dump(url_cache, f, ensure_ascii=False, indent=2)
|
| 259 |
+
|
| 260 |
+
# ---------------------- Model Loading ----------------------
|
| 261 |
+
print(f"Loading tokenizer from {model_path}...")
|
| 262 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 263 |
+
if tokenizer.pad_token is None:
|
| 264 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 265 |
+
tokenizer.padding_side = 'left' # 主要是左填充
|
| 266 |
+
print("Tokenizer loaded successfully.")
|
| 267 |
+
|
| 268 |
+
# Define output directory based on model and dataset
|
| 269 |
+
if 'qwq' in model_path.lower():
|
| 270 |
+
if dataset_name in ['math500', 'gpqa', 'aime', 'amc', 'livecode']:
|
| 271 |
+
output_dir = f'./outputs/{dataset_name}.qwq.search_o1'
|
| 272 |
+
if dataset_name == 'gpqa' and (MAX_SEARCH_LIMIT != 5 or top_k != 10):
|
| 273 |
+
output_dir = f'./outputs/runs.analysis/{dataset_name}.qwq.search_o1.{MAX_SEARCH_LIMIT}.{top_k}'
|
| 274 |
+
else:
|
| 275 |
+
output_dir = f'./outputs/runs.qa/{dataset_name}.qwq.search_o1'
|
| 276 |
+
else:
|
| 277 |
+
model_short_name = model_path.split('/')[-1].lower().replace('-instruct', '')
|
| 278 |
+
output_dir = f'./outputs/runs.baselines/{dataset_name}.{model_short_name}.search_o1'
|
| 279 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 280 |
+
|
| 281 |
+
print(f"Loading model from {model_path}...")
|
| 282 |
+
print(f"device_count: {torch.cuda.device_count()}")
|
| 283 |
+
# Initialize the LLM
|
| 284 |
+
llm = LLM(
|
| 285 |
+
model=model_path,
|
| 286 |
+
tensor_parallel_size=torch.cuda.device_count(),
|
| 287 |
+
gpu_memory_utilization=0.95,
|
| 288 |
+
)
|
| 289 |
+
print("Model loaded successfully.")
|
| 290 |
+
|
| 291 |
+
# ---------------------- Data Loading ----------------------
|
| 292 |
+
print(f"Loading data from {data_path}...")
|
| 293 |
+
with open(data_path, 'r', encoding='utf-8') as json_file:
|
| 294 |
+
filtered_data = json.load(json_file)
|
| 295 |
+
print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
|
| 296 |
+
|
| 297 |
+
# ---------------------- Batch Generation Function ----------------------
|
| 298 |
+
def generate_webpage_to_reasonchain_batch( # 模型基于网页内容生成推理,然后从模型的回复中提取答案
|
| 299 |
+
original_questions: List[str],
|
| 300 |
+
prev_reasonings: List[str],
|
| 301 |
+
search_queries: List[str],
|
| 302 |
+
documents: List[str],
|
| 303 |
+
dataset_name: str,
|
| 304 |
+
batch_output_records: List[Dict], # New parameter to collect outputs
|
| 305 |
+
max_tokens: int = 32768,
|
| 306 |
+
coherent: bool = False,
|
| 307 |
+
) -> List[str]:
|
| 308 |
+
user_prompts = [ # 根据之前的推理,搜索query和搜索到的doc生成用户提示
|
| 309 |
+
get_webpage_to_reasonchain_instruction(r, sq, doc)
|
| 310 |
+
for r, sq, doc in zip(prev_reasonings, search_queries, documents)
|
| 311 |
+
]
|
| 312 |
+
|
| 313 |
+
prompts = [{"role": "user", "content": up} for up in user_prompts]
|
| 314 |
+
prompts = [tokenizer.apply_chat_template([p], tokenize=False, add_generation_prompt=True) for p in prompts]
|
| 315 |
+
|
| 316 |
+
output = llm.generate( # 生成模型回复
|
| 317 |
+
prompts,
|
| 318 |
+
sampling_params=SamplingParams(
|
| 319 |
+
max_tokens=max_tokens,
|
| 320 |
+
temperature=0.7,
|
| 321 |
+
top_p=0.8,
|
| 322 |
+
top_k=20,
|
| 323 |
+
repetition_penalty=1.05,
|
| 324 |
+
)
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
raw_outputs = [out.outputs[0].text for out in output]
|
| 328 |
+
extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs] # 提取模型基于网页生成的推理
|
| 329 |
+
|
| 330 |
+
for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
|
| 331 |
+
batch_output_records.append({
|
| 332 |
+
'prompt': p,
|
| 333 |
+
'raw_output': r,
|
| 334 |
+
'extracted_info': e
|
| 335 |
+
})
|
| 336 |
+
|
| 337 |
+
return extracted_infos
|
| 338 |
+
|
| 339 |
+
# ---------------------- Preparation of Input Prompts ----------------------
|
| 340 |
+
input_list = []
|
| 341 |
+
for item in filtered_data: # 生成prompts
|
| 342 |
+
question = item['Question']
|
| 343 |
+
|
| 344 |
+
if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 345 |
+
if dataset_name in ['nq', 'triviaqa']:
|
| 346 |
+
instruction = get_singleqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 347 |
+
elif dataset_name in ['hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 348 |
+
instruction = get_multiqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 349 |
+
if 'qwq' in model_path.lower():
|
| 350 |
+
user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 351 |
+
else:
|
| 352 |
+
user_prompt = get_task_instruction_openqa(question)
|
| 353 |
+
|
| 354 |
+
elif dataset_name in ['math500', 'aime', 'amc']:
|
| 355 |
+
instruction = get_math_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 356 |
+
if 'qwq' in model_path.lower():
|
| 357 |
+
user_prompt = get_task_instruction_math(question, model_name='qwq')
|
| 358 |
+
else:
|
| 359 |
+
user_prompt = get_task_instruction_math(question)
|
| 360 |
+
|
| 361 |
+
elif dataset_name == 'gpqa':
|
| 362 |
+
instruction = get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 363 |
+
if 'qwq' in model_path.lower():
|
| 364 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
|
| 365 |
+
elif 'llama' in model_path.lower():
|
| 366 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='llama')
|
| 367 |
+
else:
|
| 368 |
+
user_prompt = get_task_instruction_multi_choice(question)
|
| 369 |
+
|
| 370 |
+
elif dataset_name == 'livecode':
|
| 371 |
+
instruction = get_code_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 372 |
+
question_title = item.get('question_title', '')
|
| 373 |
+
if 'qwq' in model_path.lower():
|
| 374 |
+
user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
|
| 375 |
+
else:
|
| 376 |
+
user_prompt = get_task_instruction_code(question)
|
| 377 |
+
else:
|
| 378 |
+
user_prompt = "" # Default to empty if dataset not matched
|
| 379 |
+
|
| 380 |
+
prompt = [{"role": "user", "content": instruction + user_prompt}] # instruction是告诉模型怎么进行搜索,user_prompt是用户具体问题
|
| 381 |
+
prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
|
| 382 |
+
input_list.append(prompt)
|
| 383 |
+
|
| 384 |
+
if subset_num != -1:
|
| 385 |
+
input_list = input_list[:subset_num]
|
| 386 |
+
filtered_data = filtered_data[:subset_num]
|
| 387 |
+
|
| 388 |
+
# Initialize active sequences
|
| 389 |
+
active_sequences = [{ # 记录每个问题的搜索历史
|
| 390 |
+
'item': item,
|
| 391 |
+
'prompt': prompt,
|
| 392 |
+
'output': '',
|
| 393 |
+
'finished': False, # 一开始均为未完成
|
| 394 |
+
'history': [],
|
| 395 |
+
'search_count': 0,
|
| 396 |
+
'executed_search_queries': set(),
|
| 397 |
+
'all_info': [],
|
| 398 |
+
} for item, prompt in zip(filtered_data, input_list)]
|
| 399 |
+
|
| 400 |
+
# ---------------------- Set Max Tokens ----------------------
|
| 401 |
+
if 'qwq' in model_path.lower():
|
| 402 |
+
if dataset_name in ['aime', 'amc', 'livecode']:
|
| 403 |
+
max_tokens = 32768
|
| 404 |
+
else:
|
| 405 |
+
max_tokens = 20480
|
| 406 |
+
else:
|
| 407 |
+
max_tokens = 8192
|
| 408 |
+
|
| 409 |
+
# ---------------------- Generation Function ----------------------
|
| 410 |
+
def run_generation(sequences: List[Dict], max_tokens: int) -> List:
|
| 411 |
+
prompts = [s['prompt'] for s in sequences] # 提取prompt
|
| 412 |
+
sampling_params = SamplingParams(
|
| 413 |
+
max_tokens=max_tokens,
|
| 414 |
+
temperature=temperature,
|
| 415 |
+
top_p=top_p,
|
| 416 |
+
top_k=top_k_sampling,
|
| 417 |
+
repetition_penalty=repetition_penalty,
|
| 418 |
+
stop=[END_SEARCH_QUERY, tokenizer.eos_token],
|
| 419 |
+
include_stop_str_in_output=True,
|
| 420 |
+
)
|
| 421 |
+
output_list = llm.generate(prompts, sampling_params=sampling_params) # 模型根据prompt生成回答
|
| 422 |
+
return output_list
|
| 423 |
+
|
| 424 |
+
# Function to extract text between two tags 提取位于 start_tag 和 end_tag 之间的内容
|
| 425 |
+
def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
|
| 426 |
+
pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
|
| 427 |
+
matches = re.findall(pattern, text, flags=re.DOTALL)
|
| 428 |
+
if matches:
|
| 429 |
+
return matches[-1].strip()
|
| 430 |
+
return None
|
| 431 |
+
|
| 432 |
+
def replace_recent_steps(origin_str, replace_str): # 使用replace_str更新origin_str
|
| 433 |
+
"""
|
| 434 |
+
Replaces specific steps in the original reasoning steps with new steps.
|
| 435 |
+
If a replacement step contains "DELETE THIS STEP", that step is removed.
|
| 436 |
+
|
| 437 |
+
Parameters:
|
| 438 |
+
- origin_str (str): The original reasoning steps.
|
| 439 |
+
- replace_str (str): The steps to replace or delete.
|
| 440 |
+
|
| 441 |
+
Returns:
|
| 442 |
+
- str: The updated reasoning steps after applying replacements.
|
| 443 |
+
这个函数的主要功能是替换给定的推理步骤(reasoning steps),
|
| 444 |
+
并根据传入的 replace_str 进行更新。
|
| 445 |
+
如果新的步骤包含 "DELETE THIS STEP",则删除该步骤
|
| 446 |
+
"""
|
| 447 |
+
|
| 448 |
+
def parse_steps(text):
|
| 449 |
+
"""
|
| 450 |
+
Parses the reasoning steps from a given text.
|
| 451 |
+
|
| 452 |
+
Parameters:
|
| 453 |
+
- text (str): The text containing reasoning steps.
|
| 454 |
+
|
| 455 |
+
Returns:
|
| 456 |
+
- dict: A dictionary mapping step numbers to their content.
|
| 457 |
+
"""
|
| 458 |
+
step_pattern = re.compile(r"Step\s+(\d+):\s*") # 这个模式会匹配 "Step" 后面跟一个或多个空格,然后是一个数字(步骤编号),最后是冒号
|
| 459 |
+
steps = {}
|
| 460 |
+
current_step_num = None
|
| 461 |
+
current_content = []
|
| 462 |
+
|
| 463 |
+
for line in text.splitlines(): # 将输入的文本按行分割,并逐行遍历。每一行会被检查是否包含一个步骤
|
| 464 |
+
step_match = step_pattern.match(line)
|
| 465 |
+
if step_match: # 匹配到一个新的步骤
|
| 466 |
+
# If there's an ongoing step, save its content,如果当前的步骤不为空,将其为上一个步骤,先将上一个步骤的内容(存在current_content中)保存,然后再更新current_step_num和current_content
|
| 467 |
+
if current_step_num is not None:
|
| 468 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 469 |
+
current_step_num = int(step_match.group(1))
|
| 470 |
+
content = line[step_match.end():].strip()
|
| 471 |
+
current_content = [content] if content else []
|
| 472 |
+
else:
|
| 473 |
+
if current_step_num is not None:
|
| 474 |
+
current_content.append(line)
|
| 475 |
+
|
| 476 |
+
# Save the last step if any
|
| 477 |
+
if current_step_num is not None: # 保存最后一个步骤
|
| 478 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 479 |
+
|
| 480 |
+
return steps
|
| 481 |
+
|
| 482 |
+
# Parse the original and replacement steps
|
| 483 |
+
origin_steps = parse_steps(origin_str) # 解析原始的推理步骤
|
| 484 |
+
replace_steps = parse_steps(replace_str) # 解析要替换的推理步骤
|
| 485 |
+
|
| 486 |
+
# Apply replacements
|
| 487 |
+
for step_num, content in replace_steps.items(): # 遍历要替换的步骤
|
| 488 |
+
if "DELETE THIS STEP" in content:
|
| 489 |
+
# Remove the step if it exists
|
| 490 |
+
if step_num in origin_steps: # 如果要删除的步骤在原始的推理步骤中存在,则删除该步骤
|
| 491 |
+
del origin_steps[step_num]
|
| 492 |
+
else: # 如果要替换的步骤不是要删除的步骤,则替换该步骤
|
| 493 |
+
# Replace or add the step
|
| 494 |
+
origin_steps[step_num] = content
|
| 495 |
+
|
| 496 |
+
# Sort the steps by step number
|
| 497 |
+
sorted_steps = sorted(origin_steps.items()) # 按照步骤编号对步骤进行排序
|
| 498 |
+
|
| 499 |
+
# Reconstruct the reasoning steps as a single string
|
| 500 |
+
new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps]) # 根据排序后的步骤构建新的推理步骤字符串,步骤之间以两个换行符分隔
|
| 501 |
+
|
| 502 |
+
return new_reasoning_steps
|
| 503 |
+
|
| 504 |
+
# ---------------------- Initialize Collection Structure ----------------------
|
| 505 |
+
# Initialize a list to collect batch outputs
|
| 506 |
+
batch_output_records = []
|
| 507 |
+
|
| 508 |
+
start_time = time.time()
|
| 509 |
+
turn = 0
|
| 510 |
+
|
| 511 |
+
# 流程
|
| 512 |
+
# 首先根据prompt让模型生成回复
|
| 513 |
+
# 从模型的回复中提取搜索查询
|
| 514 |
+
# 如果有(回复要以END_SEARCH_QUERY结尾)
|
| 515 |
+
# 根据搜索查询,从互联网上提取相关信息
|
| 516 |
+
# 处理查询的信息
|
| 517 |
+
# 让模型基于之前的步骤,检索query和查询到的信息生成新的推理,得到search result
|
| 518 |
+
# 然后回到第一步(这里模型就会根据前面的search result,再次生成新的回复
|
| 519 |
+
# 如果没有查询则该条问题结束
|
| 520 |
+
|
| 521 |
+
# Main loop until all sequences are finished or maximum turns reached
|
| 522 |
+
while True:
|
| 523 |
+
# Identify sequences that need generation
|
| 524 |
+
sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']] # 筛选出需要生成的新内容的序列,active_sequences 是一个包含所有活跃序列的列表
|
| 525 |
+
|
| 526 |
+
if sequences_needing_generation:
|
| 527 |
+
turn += 1
|
| 528 |
+
print(f'\n-------------- Turn {turn} --------------')
|
| 529 |
+
print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
|
| 530 |
+
outputs = run_generation(sequences_needing_generation, max_tokens) # 根据prompt
|
| 531 |
+
print("Generation completed, processing outputs...")
|
| 532 |
+
|
| 533 |
+
# Initialize batch variables
|
| 534 |
+
batch_relevant_info = []
|
| 535 |
+
batch_original_questions = []
|
| 536 |
+
batch_prev_reasonings = []
|
| 537 |
+
batch_search_queries = []
|
| 538 |
+
batch_documents = []
|
| 539 |
+
batch_sequences = []
|
| 540 |
+
|
| 541 |
+
# Collect URLs to fetch across all sequences
|
| 542 |
+
all_urls_to_fetch = set() # 初始化一个集合 all_urls_to_fetch 用来收集所有需要获取的 URL
|
| 543 |
+
url_snippets = {}
|
| 544 |
+
url_sequence_map = {} # Map URL to list of sequences needing it
|
| 545 |
+
|
| 546 |
+
# Process each sequence and collect URLs
|
| 547 |
+
for seq, out in zip(sequences_needing_generation, outputs): # 遍历需要生成新内容的序列,并生成新内容,同时收集需要获取的 URL
|
| 548 |
+
text = out.outputs[0].text # 将生成的文本添加到序列的历史记录、提示和输出中
|
| 549 |
+
seq['history'].append(text)
|
| 550 |
+
# Append generated text to prompt and output
|
| 551 |
+
seq['prompt'] += text
|
| 552 |
+
seq['output'] += text
|
| 553 |
+
seq['all_info'].append({f"turn_{turn}_reason": text})
|
| 554 |
+
# Extract search query
|
| 555 |
+
search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY) # 提取搜索查询
|
| 556 |
+
|
| 557 |
+
# If a search query is present and needs to be executed
|
| 558 |
+
if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
|
| 559 |
+
if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
|
| 560 |
+
# Execute search, use cache if available
|
| 561 |
+
if search_query in search_cache:
|
| 562 |
+
results = search_cache[search_query] # 如果搜索查询结果在缓存中存在,则从缓存中获取结果
|
| 563 |
+
print(f"Using cached search results for query: \"{search_query}\"")
|
| 564 |
+
else:
|
| 565 |
+
try:
|
| 566 |
+
results = bing_web_search(search_query, bing_subscription_key, bing_endpoint, market='en-US', language='en') # 执行搜索
|
| 567 |
+
search_cache[search_query] = results # 将搜索结果添加到缓存中
|
| 568 |
+
print(f"Executed and cached search for query: \"{search_query}\"")
|
| 569 |
+
except Exception as e:
|
| 570 |
+
print(f"Error during search query '{search_query}': {e}")
|
| 571 |
+
search_cache[search_query] = {}
|
| 572 |
+
results = {}
|
| 573 |
+
|
| 574 |
+
# Extract relevant information from Bing search results
|
| 575 |
+
relevant_info = extract_relevant_info(results)[:top_k] # 从搜索结果中提取出最相关的信息
|
| 576 |
+
seq['relevant_info'] = relevant_info
|
| 577 |
+
|
| 578 |
+
# Extract URLs and snippets
|
| 579 |
+
urls_to_fetch = [it['url'] for it in relevant_info] # 从搜索结果中提取出所有 URL
|
| 580 |
+
snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info} # 创建一个字典 snippets,将 URL 映射到它们对应的片段(snippet)。如果 snippet 字段存在,则把它加入字典
|
| 581 |
+
|
| 582 |
+
# Filter URLs that are not cached
|
| 583 |
+
urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache] # 筛选出所有没有被缓存的 UR
|
| 584 |
+
cached_urls = [u for u in urls_to_fetch if u in url_cache] # 选出已经缓存的 URL(即在 url_cache 中存在的 URL)。这些 URL 已经被处理过,不需要再次请求
|
| 585 |
+
|
| 586 |
+
# Store info for all_urls_to_fetch and url_snippets
|
| 587 |
+
for url in urls_to_fetch_filtered:
|
| 588 |
+
all_urls_to_fetch.add(url)
|
| 589 |
+
url_snippets[url] = snippets.get(url, "") # 将每个 URL 对应的片段存储到 url_snippets 字典中
|
| 590 |
+
|
| 591 |
+
all_reasoning_steps = seq['output']
|
| 592 |
+
all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n") # 将连续的空行(\n\n)替换为单个换行符(\n),然后按换行符拆分文本,得到每个推理步骤单独的一行
|
| 593 |
+
|
| 594 |
+
truncated_prev_reasoning = ""
|
| 595 |
+
for i, step in enumerate(all_reasoning_steps):
|
| 596 |
+
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n" # 遍历 all_reasoning_steps 中的每个步骤,并将每个步骤编号和步骤内容格式化后,添加到 truncated_prev_reasoning 字符串中。这样生成一个包含步骤编号和内容的字符串
|
| 597 |
+
|
| 598 |
+
prev_steps = truncated_prev_reasoning.split('\n\n') # 将推理步骤字符串 truncated_prev_reasoning 按照每两个换行符拆分成多个步骤
|
| 599 |
+
if len(prev_steps) <= 5: # 如果步骤的数量不超过 5,直接保留所有步骤
|
| 600 |
+
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
| 601 |
+
else:
|
| 602 |
+
truncated_prev_reasoning = ''
|
| 603 |
+
for i, step in enumerate(prev_steps): # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 604 |
+
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
| 605 |
+
truncated_prev_reasoning += step + '\n\n'
|
| 606 |
+
else: # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 607 |
+
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
| 608 |
+
truncated_prev_reasoning += '...\n\n'
|
| 609 |
+
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
| 610 |
+
|
| 611 |
+
# Collect parameters for batch processing
|
| 612 |
+
batch_relevant_info.append(relevant_info) # 搜索出来的信息
|
| 613 |
+
batch_original_questions.append(seq['item']['Question']) # 原始问题
|
| 614 |
+
batch_prev_reasonings.append(truncated_prev_reasoning) # 之前的推理步骤
|
| 615 |
+
batch_search_queries.append(search_query) # 搜索查询
|
| 616 |
+
batch_sequences.append(seq)
|
| 617 |
+
|
| 618 |
+
# Update search count and executed queries
|
| 619 |
+
seq['search_count'] += 1 # 更新搜索计数
|
| 620 |
+
seq['executed_search_queries'].add(search_query) # 将已执行的搜索查询添加到集合中
|
| 621 |
+
|
| 622 |
+
elif seq['search_count'] >= MAX_SEARCH_LIMIT: # 如果搜索次数达到或超过该限制,则返回一条消息,通知该查询无法再进行
|
| 623 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
| 624 |
+
seq['prompt'] += limit_message
|
| 625 |
+
seq['output'] += limit_message
|
| 626 |
+
seq['history'].append(limit_message)
|
| 627 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 628 |
+
print(f"Search limit reached for query: \"{search_query}\"")
|
| 629 |
+
|
| 630 |
+
elif search_query in seq['executed_search_queries']: # 如果当前查询已经执行过,则返回一个消息,提示用户查询已重复,并引导其查看之前的结果
|
| 631 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
| 632 |
+
seq['prompt'] += limit_message
|
| 633 |
+
seq['output'] += limit_message
|
| 634 |
+
seq['history'].append(limit_message)
|
| 635 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 636 |
+
print(f"Repeated search for query: \"{search_query}\"")
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
else: # 如果当前序列没有需要执行的搜索查询,则将该序列标记为完成,并打印提示信息
|
| 640 |
+
# If no search query needs to be executed, mark the sequence as finished
|
| 641 |
+
seq['finished'] = True
|
| 642 |
+
print("Sequence marked as complete.")
|
| 643 |
+
|
| 644 |
+
print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
|
| 645 |
+
print(f"all_urls_to_fetch: {all_urls_to_fetch}")
|
| 646 |
+
# Batch fetch all URLs at once to optimize speed
|
| 647 |
+
|
| 648 |
+
if all_urls_to_fetch:
|
| 649 |
+
print(f"Fetching {len(all_urls_to_fetch)} URLs...")
|
| 650 |
+
try:
|
| 651 |
+
fetched_contents = fetch_page_content( # 一次性获取所有 URL 的中搜索出来的内容
|
| 652 |
+
list(all_urls_to_fetch),
|
| 653 |
+
use_jina=use_jina,
|
| 654 |
+
jina_api_key=jina_api_key,
|
| 655 |
+
# snippets=url_snippets # Do not pass snippets when updating url_cache directly
|
| 656 |
+
)
|
| 657 |
+
print(f"Fetched {len(fetched_contents)} URLs successfully.")
|
| 658 |
+
except Exception as e:
|
| 659 |
+
print(f"Error during batch URL fetching: {e}")
|
| 660 |
+
fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
|
| 661 |
+
# Update cache with fetched contents
|
| 662 |
+
for url, content in fetched_contents.items(): # 将获取的内容添加到 url_cache 中
|
| 663 |
+
url_cache[url] = content
|
| 664 |
+
|
| 665 |
+
# After fetching, prepare formatted documents for batch processing
|
| 666 |
+
for relevant_info in batch_relevant_info:
|
| 667 |
+
formatted_documents = "" # 初始化一个空字符串 formatted_documents,用于拼接本次要处理的所有网页信息。后面会将其添加到 batch_documents 列表中
|
| 668 |
+
for i, doc_info in enumerate(relevant_info):
|
| 669 |
+
url = doc_info['url']
|
| 670 |
+
raw_context = url_cache.get(url, "") # 获取 url 对应的内容
|
| 671 |
+
doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
|
| 672 |
+
success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
|
| 673 |
+
if success:
|
| 674 |
+
context = filtered_context
|
| 675 |
+
else: # 否则就取 raw_context 的前 max_doc_len * 2 个字符,作为一个有限的片段,避免过长导致后续处理负担
|
| 676 |
+
context = raw_context[:max_doc_len*2]
|
| 677 |
+
|
| 678 |
+
doc_info['context'] = context
|
| 679 |
+
formatted_documents += f"**Web Page {i + 1}:**\n"
|
| 680 |
+
formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
|
| 681 |
+
|
| 682 |
+
batch_documents.append(formatted_documents) # 将本组搜索结果的所有信息(拼接成的字符串 formatted_documents)添加到 batch_documents 列表中
|
| 683 |
+
|
| 684 |
+
# After fetching, prepare for batch processing if there are any
|
| 685 |
+
if batch_sequences:
|
| 686 |
+
print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
|
| 687 |
+
webpage_analyses = generate_webpage_to_reasonchain_batch( # 根据前面处理好的结果,生成新的推理
|
| 688 |
+
original_questions=batch_original_questions,
|
| 689 |
+
prev_reasonings=batch_prev_reasonings,
|
| 690 |
+
search_queries=batch_search_queries,
|
| 691 |
+
documents=batch_documents,
|
| 692 |
+
dataset_name=dataset_name,
|
| 693 |
+
batch_output_records=batch_output_records, # Pass the collection list
|
| 694 |
+
max_tokens=max_tokens,
|
| 695 |
+
)
|
| 696 |
+
print("Batch generation completed, assigning outputs to sequences...")
|
| 697 |
+
|
| 698 |
+
for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents): # 遍历批处理返回的 webpage_analyses,将处理结果与相应的序列 seq 进行一一对应
|
| 699 |
+
if isinstance(analysis, str): # 判断 analysis 是否是纯字符串。如果是字符串,说明直接可以追加到序列的文本中
|
| 700 |
+
append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n" # 封装处理结果,添加到序列的历史记录、提示和输出中
|
| 701 |
+
seq['prompt'] += append_text
|
| 702 |
+
seq['output'] += append_text
|
| 703 |
+
seq['history'].append(append_text) # 存的是每一次的webpage_analyses
|
| 704 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 705 |
+
else: # 如果 analysis 不是纯字符串,那么可能是一种特殊的结构,比如表示需要替换推理步骤的 dict 或其他格式
|
| 706 |
+
append_text = replace_recent_steps(seq['output'], analysis)
|
| 707 |
+
seq['prompt'] += append_text
|
| 708 |
+
seq['output'] += append_text
|
| 709 |
+
seq['history'].append(append_text)
|
| 710 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 711 |
+
|
| 712 |
+
# Check if all sequences are finished
|
| 713 |
+
# 保存active_sequences
|
| 714 |
+
active_sequences_part = [{ # 记录每个问题的搜索历史
|
| 715 |
+
'item': ele["item"],
|
| 716 |
+
'prompt': ele['prompt'],
|
| 717 |
+
'output': ele["output"],
|
| 718 |
+
'finished': ele["finished"], # 一开始均为未完成
|
| 719 |
+
'history':ele["history"],
|
| 720 |
+
'search_count': ele["search_count"],
|
| 721 |
+
'all_info': ele['all_info']
|
| 722 |
+
} for ele in active_sequences]
|
| 723 |
+
with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
|
| 724 |
+
json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
|
| 725 |
+
unfinished = [seq for seq in active_sequences if not seq['finished']] # 是否结束是基于模型是否生成了新的搜索
|
| 726 |
+
if not unfinished:
|
| 727 |
+
break
|
| 728 |
+
else:
|
| 729 |
+
if turn >= MAX_TURN:
|
| 730 |
+
print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
|
| 731 |
+
break
|
| 732 |
+
|
| 733 |
+
total_time = time.time() - start_time
|
| 734 |
+
print(f"Total time taken: {total_time} seconds")
|
| 735 |
+
|
| 736 |
+
# ---------------------- Save Batch Output Records to JSON File ----------------------
|
| 737 |
+
# Define output JSON file path
|
| 738 |
+
t = time.localtime()
|
| 739 |
+
batch_output_file = os.path.join(output_dir, f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
|
| 740 |
+
|
| 741 |
+
# Save batch_output_records to JSON file
|
| 742 |
+
with open(batch_output_file, 'w', encoding='utf-8') as f: # 这里存的是webpage推理时的输入和输出和提取后的信息
|
| 743 |
+
json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
|
| 744 |
+
|
| 745 |
+
print(f"Batch outputs saved to {batch_output_file}")
|
| 746 |
+
|
| 747 |
+
# Prepare output list for evaluation
|
| 748 |
+
output_list = [seq['output'] for seq in active_sequences]
|
| 749 |
+
|
| 750 |
+
# Run evaluation
|
| 751 |
+
run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
|
| 752 |
+
|
| 753 |
+
# ---------------------- Update Search and URL Cache ----------------------
|
| 754 |
+
print('Updating Search and URL Cache...')
|
| 755 |
+
# Load existing caches or initialize empty dictionaries
|
| 756 |
+
if os.path.exists(search_cache_path):
|
| 757 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 758 |
+
search_cache_new = json.load(f)
|
| 759 |
+
else:
|
| 760 |
+
search_cache_new = {}
|
| 761 |
+
|
| 762 |
+
if os.path.exists(url_cache_path):
|
| 763 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 764 |
+
url_cache_new = json.load(f)
|
| 765 |
+
else:
|
| 766 |
+
url_cache_new = {}
|
| 767 |
+
|
| 768 |
+
search_cache.update(search_cache_new)
|
| 769 |
+
url_cache.update(url_cache_new)
|
| 770 |
+
|
| 771 |
+
save_caches()
|
| 772 |
+
|
| 773 |
+
print("Process completed.")
|
| 774 |
+
|
| 775 |
+
if __name__ == "__main__":
|
| 776 |
+
main()
|
deep_search/search_o1/scripts/run_search_o1_test.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_search_o1.py
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import re
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import string
|
| 10 |
+
from typing import Optional, Tuple, List, Dict
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
from transformers import AutoTokenizer
|
| 14 |
+
from vllm import LLM, SamplingParams
|
| 15 |
+
|
| 16 |
+
from bing_search import (
|
| 17 |
+
bing_web_search,
|
| 18 |
+
extract_relevant_info,
|
| 19 |
+
fetch_page_content,
|
| 20 |
+
extract_snippet_with_context
|
| 21 |
+
)
|
| 22 |
+
from evaluate import (
|
| 23 |
+
run_evaluation,
|
| 24 |
+
extract_answer
|
| 25 |
+
)
|
| 26 |
+
from prompts import (
|
| 27 |
+
get_gpqa_search_o1_instruction,
|
| 28 |
+
get_math_search_o1_instruction,
|
| 29 |
+
get_code_search_o1_instruction,
|
| 30 |
+
get_singleqa_search_o1_instruction,
|
| 31 |
+
get_multiqa_search_o1_instruction,
|
| 32 |
+
get_webpage_to_reasonchain_instruction,
|
| 33 |
+
get_task_instruction_openqa,
|
| 34 |
+
get_task_instruction_math,
|
| 35 |
+
get_task_instruction_multi_choice,
|
| 36 |
+
get_task_instruction_code,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Define special tokens
|
| 40 |
+
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
| 41 |
+
END_SEARCH_QUERY = "<|end_search_query|>"
|
| 42 |
+
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
| 43 |
+
END_SEARCH_RESULT = "<|end_search_result|>"
|
| 44 |
+
|
| 45 |
+
def parse_args():
|
| 46 |
+
parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
|
| 47 |
+
|
| 48 |
+
# Dataset and split configuration
|
| 49 |
+
parser.add_argument(
|
| 50 |
+
'--dataset_name',
|
| 51 |
+
type=str,
|
| 52 |
+
required=True,
|
| 53 |
+
choices=['gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
|
| 54 |
+
help="Name of the dataset to use."
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
parser.add_argument(
|
| 58 |
+
'--split',
|
| 59 |
+
type=str,
|
| 60 |
+
required=True,
|
| 61 |
+
choices=['test', 'diamond', 'main', 'extended'],
|
| 62 |
+
help="Dataset split to use."
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
parser.add_argument(
|
| 66 |
+
'--subset_num',
|
| 67 |
+
type=int,
|
| 68 |
+
default=-1,
|
| 69 |
+
help="Number of examples to process. Defaults to all if not specified."
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# Search and document retrieval configuration
|
| 73 |
+
parser.add_argument(
|
| 74 |
+
'--max_search_limit',
|
| 75 |
+
type=int,
|
| 76 |
+
default=10,
|
| 77 |
+
help="Maximum number of searches per question."
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
parser.add_argument(
|
| 81 |
+
'--max_turn',
|
| 82 |
+
type=int,
|
| 83 |
+
default=15,
|
| 84 |
+
help="Maximum number of turns."
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
parser.add_argument( # 使用搜索引擎时,返回的最大文档数
|
| 88 |
+
'--top_k',
|
| 89 |
+
type=int,
|
| 90 |
+
default=10,
|
| 91 |
+
help="Maximum number of search documents to return."
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
parser.add_argument(
|
| 95 |
+
'--max_doc_len',
|
| 96 |
+
type=int,
|
| 97 |
+
default=3000,
|
| 98 |
+
help="Maximum length of each searched document."
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
parser.add_argument(
|
| 102 |
+
'--use_jina',
|
| 103 |
+
action='store_true',
|
| 104 |
+
help="Whether to use Jina API for document fetching."
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
parser.add_argument(
|
| 108 |
+
'--jina_api_key',
|
| 109 |
+
type=str,
|
| 110 |
+
default='None',
|
| 111 |
+
help="Your Jina API Key to Fetch URL Content."
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# Model configuration
|
| 115 |
+
parser.add_argument(
|
| 116 |
+
'--model_path',
|
| 117 |
+
type=str,
|
| 118 |
+
required=True,
|
| 119 |
+
help="Path to the pre-trained model."
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# Sampling parameters
|
| 123 |
+
parser.add_argument(
|
| 124 |
+
'--temperature',
|
| 125 |
+
type=float,
|
| 126 |
+
default=0.7,
|
| 127 |
+
help="Sampling temperature."
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
parser.add_argument(
|
| 131 |
+
'--top_p',
|
| 132 |
+
type=float,
|
| 133 |
+
default=0.8,
|
| 134 |
+
help="Top-p sampling parameter."
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
parser.add_argument(
|
| 138 |
+
'--top_k_sampling',
|
| 139 |
+
type=int,
|
| 140 |
+
default=20,
|
| 141 |
+
help="Top-k sampling parameter."
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
parser.add_argument(
|
| 145 |
+
'--repetition_penalty',
|
| 146 |
+
type=float,
|
| 147 |
+
default=None,
|
| 148 |
+
help="Repetition penalty. If not set, defaults based on the model."
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
parser.add_argument(
|
| 152 |
+
'--max_tokens',
|
| 153 |
+
type=int,
|
| 154 |
+
default=32768,
|
| 155 |
+
help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# Bing API Configuration
|
| 159 |
+
parser.add_argument(
|
| 160 |
+
'--bing_subscription_key',
|
| 161 |
+
type=str,
|
| 162 |
+
required=True,
|
| 163 |
+
help="Bing Search API subscription key."
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
parser.add_argument(
|
| 167 |
+
'--bing_endpoint',
|
| 168 |
+
type=str,
|
| 169 |
+
default="https://api.bing.microsoft.com/v7.0/search",
|
| 170 |
+
help="Bing Search API endpoint."
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
return parser.parse_args()
|
| 176 |
+
|
| 177 |
+
def main():
|
| 178 |
+
args = parse_args()
|
| 179 |
+
print(f"args.use_jina: {args.use_jina}")
|
| 180 |
+
# Extract arguments
|
| 181 |
+
dataset_name = args.dataset_name
|
| 182 |
+
split = args.split
|
| 183 |
+
subset_num = args.subset_num
|
| 184 |
+
MAX_SEARCH_LIMIT = args.max_search_limit
|
| 185 |
+
MAX_TURN = args.max_turn
|
| 186 |
+
top_k = args.top_k
|
| 187 |
+
max_doc_len = args.max_doc_len
|
| 188 |
+
model_path = args.model_path
|
| 189 |
+
temperature = args.temperature
|
| 190 |
+
top_p = args.top_p
|
| 191 |
+
top_k_sampling = args.top_k_sampling
|
| 192 |
+
repetition_penalty = args.repetition_penalty
|
| 193 |
+
max_tokens = args.max_tokens
|
| 194 |
+
bing_subscription_key = args.bing_subscription_key
|
| 195 |
+
bing_endpoint = args.bing_endpoint
|
| 196 |
+
use_jina = args.use_jina
|
| 197 |
+
jina_api_key = args.jina_api_key
|
| 198 |
+
# use_jina = False
|
| 199 |
+
print(f"use_jina: {use_jina}")
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
main()
|
deep_search/search_o1/scripts/search_o1_sum_all_webpage.py
ADDED
|
@@ -0,0 +1,982 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_search_o1.py
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import re
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import string
|
| 10 |
+
from typing import Optional, Tuple, List, Dict
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
from transformers import AutoTokenizer
|
| 14 |
+
from vllm import LLM, SamplingParams
|
| 15 |
+
|
| 16 |
+
from bing_search import (
|
| 17 |
+
bing_web_search,
|
| 18 |
+
extract_relevant_info,
|
| 19 |
+
fetch_page_content,
|
| 20 |
+
extract_snippet_with_context
|
| 21 |
+
)
|
| 22 |
+
from evaluate import (
|
| 23 |
+
run_evaluation,
|
| 24 |
+
run_evaluation_for_eval,
|
| 25 |
+
extract_answer
|
| 26 |
+
)
|
| 27 |
+
from prompts_step_by_step import (
|
| 28 |
+
get_gpqa_search_o1_instruction,
|
| 29 |
+
get_math_search_o1_instruction,
|
| 30 |
+
get_code_search_o1_instruction,
|
| 31 |
+
get_singleqa_search_o1_instruction,
|
| 32 |
+
get_multiqa_search_o1_instruction,
|
| 33 |
+
get_webpage_to_reasonchain_instruction,
|
| 34 |
+
get_task_instruction_openqa,
|
| 35 |
+
get_task_instruction_math,
|
| 36 |
+
get_task_instruction_multi_choice,
|
| 37 |
+
get_task_instruction_code,
|
| 38 |
+
get_singleqa_search_o1_instruction_1,
|
| 39 |
+
get_multiqa_search_o1_instruction_1,
|
| 40 |
+
get_webpage_to_reasonchain_instruction_1,
|
| 41 |
+
get_math_search_o1_instruction_1,
|
| 42 |
+
get_multiqa_search_o1_instruction_4,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
from openai import OpenAI
|
| 47 |
+
|
| 48 |
+
from add_eval import add_eval
|
| 49 |
+
# Modify OpenAI's API key and API base to use vLLM's API server.
|
| 50 |
+
# 使用 vLLM 的 API 服务器需要修改 OpenAI 的 API 密钥和 API 库。
|
| 51 |
+
|
| 52 |
+
# openai_api_key = "EMPTY"
|
| 53 |
+
# openai_api_base = "http://localhost:8000/v1"
|
| 54 |
+
# client = OpenAI(
|
| 55 |
+
# api_key=openai_api_key,
|
| 56 |
+
# base_url=openai_api_base,
|
| 57 |
+
# )
|
| 58 |
+
|
| 59 |
+
# Define special tokens
|
| 60 |
+
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
| 61 |
+
END_SEARCH_QUERY = "<|end_search_query|>"
|
| 62 |
+
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
| 63 |
+
END_SEARCH_RESULT = "<|end_search_result|>"
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# os.environ['http_proxy'] = 'http://127.0.0.1:7880'
|
| 67 |
+
# os.environ['https_proxy'] = 'http://127.0.0.1:7880'
|
| 68 |
+
|
| 69 |
+
# 增加了cache共享和has answer评测,truncate doc
|
| 70 |
+
|
| 71 |
+
def parse_args():
|
| 72 |
+
parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
|
| 73 |
+
|
| 74 |
+
# Dataset and split configuration
|
| 75 |
+
# parser.add_argument(
|
| 76 |
+
# '--dataset_name',
|
| 77 |
+
# type=str,
|
| 78 |
+
# required=True,
|
| 79 |
+
# choices=['eval', 'simpleqa','chinese_simpleqa','gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
|
| 80 |
+
# help="Name of the dataset to use."
|
| 81 |
+
# )
|
| 82 |
+
|
| 83 |
+
parser.add_argument(
|
| 84 |
+
'--dataset_name',
|
| 85 |
+
type=str,
|
| 86 |
+
required=True,
|
| 87 |
+
help="Name of the dataset to use."
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# parser.add_argument(
|
| 91 |
+
# '--split',
|
| 92 |
+
# type=str,
|
| 93 |
+
# required=True,
|
| 94 |
+
# choices=['eval', 'test', 'diamond', 'main', 'extended'],
|
| 95 |
+
# help="Dataset split to use."
|
| 96 |
+
# )
|
| 97 |
+
parser.add_argument(
|
| 98 |
+
'--split',
|
| 99 |
+
type=str,
|
| 100 |
+
required=True,
|
| 101 |
+
help="Dataset split to use."
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
parser.add_argument(
|
| 105 |
+
'--subset_num',
|
| 106 |
+
type=int,
|
| 107 |
+
default=-1,
|
| 108 |
+
help="Number of examples to process. Defaults to all if not specified."
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# Search and document retrieval configuration
|
| 112 |
+
parser.add_argument(
|
| 113 |
+
'--max_search_limit',
|
| 114 |
+
type=int,
|
| 115 |
+
default=10,
|
| 116 |
+
help="Maximum number of searches per question."
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
parser.add_argument(
|
| 120 |
+
'--max_turn',
|
| 121 |
+
type=int,
|
| 122 |
+
default=15,
|
| 123 |
+
help="Maximum number of turns."
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
parser.add_argument( # 使用搜索引擎时,返回的最大文档数
|
| 127 |
+
'--top_k',
|
| 128 |
+
type=int,
|
| 129 |
+
default=10,
|
| 130 |
+
help="Maximum number of search documents to return."
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
parser.add_argument(
|
| 134 |
+
'--max_doc_len',
|
| 135 |
+
type=int,
|
| 136 |
+
default=3000,
|
| 137 |
+
help="Maximum length of each searched document."
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# parser.add_argument(
|
| 141 |
+
# '--use_jina',
|
| 142 |
+
# type=bool,
|
| 143 |
+
# default=False,
|
| 144 |
+
# help="Whether to use Jina API for document fetching."
|
| 145 |
+
# )
|
| 146 |
+
parser.add_argument(
|
| 147 |
+
'--use_jina',
|
| 148 |
+
action='store_true',
|
| 149 |
+
help="Whether to use Jina API for document fetching."
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
parser.add_argument(
|
| 153 |
+
'--jina_api_key',
|
| 154 |
+
type=str,
|
| 155 |
+
default='None',
|
| 156 |
+
help="Your Jina API Key to Fetch URL Content."
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
# Model configuration
|
| 160 |
+
parser.add_argument(
|
| 161 |
+
'--model_path',
|
| 162 |
+
type=str,
|
| 163 |
+
required=True,
|
| 164 |
+
help="Path to the pre-trained model."
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# Sampling parameters
|
| 168 |
+
parser.add_argument(
|
| 169 |
+
'--temperature',
|
| 170 |
+
type=float,
|
| 171 |
+
default=0.7,
|
| 172 |
+
help="Sampling temperature."
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
parser.add_argument(
|
| 176 |
+
'--top_p',
|
| 177 |
+
type=float,
|
| 178 |
+
default=0.8,
|
| 179 |
+
help="Top-p sampling parameter."
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
parser.add_argument(
|
| 183 |
+
'--top_k_sampling',
|
| 184 |
+
type=int,
|
| 185 |
+
default=20,
|
| 186 |
+
help="Top-k sampling parameter."
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
parser.add_argument(
|
| 190 |
+
'--repetition_penalty',
|
| 191 |
+
type=float,
|
| 192 |
+
default=None,
|
| 193 |
+
help="Repetition penalty. If not set, defaults based on the model."
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
parser.add_argument(
|
| 197 |
+
'--max_tokens',
|
| 198 |
+
type=int,
|
| 199 |
+
default=32768,
|
| 200 |
+
help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
# Bing API Configuration
|
| 204 |
+
parser.add_argument(
|
| 205 |
+
'--bing_subscription_key',
|
| 206 |
+
type=str,
|
| 207 |
+
required=True,
|
| 208 |
+
help="Bing Search API subscription key."
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
parser.add_argument(
|
| 212 |
+
'--bing_endpoint',
|
| 213 |
+
type=str,
|
| 214 |
+
default="https://api.bing.microsoft.com/v7.0/search",
|
| 215 |
+
help="Bing Search API endpoint."
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
parser.add_argument(
|
| 219 |
+
'--cache_dir_base',
|
| 220 |
+
type=str,
|
| 221 |
+
required=True,
|
| 222 |
+
help="cache path."
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
parser.add_argument(
|
| 226 |
+
'--output_dir_base',
|
| 227 |
+
type=str,
|
| 228 |
+
required=True,
|
| 229 |
+
help="output_dir"
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
parser.add_argument(
|
| 233 |
+
'--is_exclude_urls',
|
| 234 |
+
action="store_true",
|
| 235 |
+
help="is_exclude_urls"
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
# parser.add_argument(
|
| 239 |
+
# '--model_doc_reason_path',
|
| 240 |
+
# type=str,
|
| 241 |
+
# required=True,
|
| 242 |
+
# help="Path to the document reasoning model."
|
| 243 |
+
# )
|
| 244 |
+
|
| 245 |
+
# openai_api_base
|
| 246 |
+
# parser.add_argument(
|
| 247 |
+
# '--openai_api_base',
|
| 248 |
+
# type=str,
|
| 249 |
+
# required=True,
|
| 250 |
+
# help="openai_api_base"
|
| 251 |
+
# )
|
| 252 |
+
# parser.add_argument(
|
| 253 |
+
# '--data_path',
|
| 254 |
+
# type=str,
|
| 255 |
+
# required=True,
|
| 256 |
+
# help="Path to the document reasoning model."
|
| 257 |
+
# )
|
| 258 |
+
return parser.parse_args()
|
| 259 |
+
|
| 260 |
+
def main():
|
| 261 |
+
args = parse_args()
|
| 262 |
+
# print(f"args.use_jina: {args.use_jina}")
|
| 263 |
+
# Extract arguments
|
| 264 |
+
dataset_name = args.dataset_name
|
| 265 |
+
split = args.split
|
| 266 |
+
subset_num = args.subset_num
|
| 267 |
+
MAX_SEARCH_LIMIT = args.max_search_limit
|
| 268 |
+
MAX_TURN = args.max_turn
|
| 269 |
+
top_k = args.top_k
|
| 270 |
+
max_doc_len = args.max_doc_len
|
| 271 |
+
model_path = args.model_path
|
| 272 |
+
# model_doc_reason_path = args.model_doc_reason_path
|
| 273 |
+
temperature = args.temperature
|
| 274 |
+
top_p = args.top_p
|
| 275 |
+
top_k_sampling = args.top_k_sampling
|
| 276 |
+
repetition_penalty = args.repetition_penalty
|
| 277 |
+
max_tokens = args.max_tokens
|
| 278 |
+
bing_subscription_key = args.bing_subscription_key
|
| 279 |
+
bing_endpoint = args.bing_endpoint
|
| 280 |
+
use_jina = args.use_jina
|
| 281 |
+
jina_api_key = args.jina_api_key
|
| 282 |
+
cache_dir_base = args.cache_dir_base
|
| 283 |
+
output_dir_base = args.output_dir_base
|
| 284 |
+
is_exclude_urls = args.is_exclude_urls
|
| 285 |
+
# openai_api_base = args.openai_api_base
|
| 286 |
+
use_jina = False
|
| 287 |
+
print(f"use_jina: {use_jina}")
|
| 288 |
+
print(f"temperature: {temperature}")
|
| 289 |
+
print(f"CUDA_VISIBLE_DEVICES is set to: {os.environ['CUDA_VISIBLE_DEVICES']}")
|
| 290 |
+
|
| 291 |
+
# openai_api_key = "EMPTY"
|
| 292 |
+
# openai_api_base = openai_api_base
|
| 293 |
+
# client = OpenAI(
|
| 294 |
+
# api_key=openai_api_key,
|
| 295 |
+
# base_url=openai_api_base,
|
| 296 |
+
# )
|
| 297 |
+
|
| 298 |
+
# Adjust parameters based on dataset
|
| 299 |
+
if dataset_name in ['no_error_data_871', 'eval_old_500', 'gaia', 'frames', 'realqa', 'syn_en', 'syn_zh','musique_syn', 'eval', 'new', 'chinese_simpleqa', 'simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki', 'medmcqa', 'pubhealth']:
|
| 300 |
+
MAX_SEARCH_LIMIT = 5
|
| 301 |
+
if dataset_name in ['no_error_data_871', 'eval_old_500', 'gaia', 'frames', 'realqa', 'syn_en', 'syn_zh', 'musique_syn','eval', 'new', 'chinese_simpleqa', 'simpleqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 302 |
+
MAX_SEARCH_LIMIT = 10
|
| 303 |
+
MAX_TURN = 15
|
| 304 |
+
top_k = 10
|
| 305 |
+
max_doc_len = 3000
|
| 306 |
+
else:
|
| 307 |
+
MAX_SEARCH_LIMIT = 10
|
| 308 |
+
MAX_TURN = 15
|
| 309 |
+
|
| 310 |
+
if args.jina_api_key == 'None':
|
| 311 |
+
jina_api_key = None
|
| 312 |
+
|
| 313 |
+
# Set default repetition_penalty if not provided
|
| 314 |
+
if repetition_penalty is None:
|
| 315 |
+
repetition_penalty = 1.05 if 'qwq' in model_path.lower() else 1.0
|
| 316 |
+
|
| 317 |
+
# Data paths based on dataset
|
| 318 |
+
if split == "test": # 测试用的数据集地址
|
| 319 |
+
data_path = f"./data/test/{dataset_name}.json"
|
| 320 |
+
elif split == "gen":
|
| 321 |
+
data_path = f"./data/gen/{dataset_name}.json"
|
| 322 |
+
else: # 训练用的数据集地址
|
| 323 |
+
if dataset_name == 'livecode':
|
| 324 |
+
data_path = f'./data/LiveCodeBench/{split}.json'
|
| 325 |
+
elif dataset_name in ['math500', 'gpqa', 'aime', 'amc']:
|
| 326 |
+
data_path = f'./data/{dataset_name.upper()}/{split}.json'
|
| 327 |
+
else:
|
| 328 |
+
data_path = f'./data/QA_Datasets/{dataset_name}.json'
|
| 329 |
+
|
| 330 |
+
print('-----------------------')
|
| 331 |
+
print(f'Using {dataset_name} {split} set.')
|
| 332 |
+
print('-----------------------')
|
| 333 |
+
|
| 334 |
+
# ---------------------- Caching Mechanism ----------------------
|
| 335 |
+
# Define cache directories and file paths
|
| 336 |
+
# cache_dir = './cache'
|
| 337 |
+
model_name = model_path.split('/')[-1].replace('-instruct', '')
|
| 338 |
+
# cache_dir = f'./{cache_dir_base}_{dataset_name}_{model_name}'
|
| 339 |
+
cache_dir = cache_dir_base
|
| 340 |
+
search_cache_path = os.path.join(cache_dir, 'search_cache.json')
|
| 341 |
+
url_cache_path = os.path.join(cache_dir, 'url_cache.json')
|
| 342 |
+
|
| 343 |
+
# Ensure cache directory exists
|
| 344 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 345 |
+
|
| 346 |
+
# Load existing caches or initialize empty dictionaries
|
| 347 |
+
if os.path.exists(search_cache_path):
|
| 348 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 349 |
+
search_cache = json.load(f)
|
| 350 |
+
else:
|
| 351 |
+
search_cache = {}
|
| 352 |
+
|
| 353 |
+
if os.path.exists(url_cache_path):
|
| 354 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 355 |
+
url_cache = json.load(f)
|
| 356 |
+
else:
|
| 357 |
+
url_cache = {}
|
| 358 |
+
|
| 359 |
+
# Function to save caches
|
| 360 |
+
def save_caches():
|
| 361 |
+
with open(search_cache_path, 'w', encoding='utf-8') as f:
|
| 362 |
+
json.dump(search_cache, f, ensure_ascii=False, indent=2)
|
| 363 |
+
with open(url_cache_path, 'w', encoding='utf-8') as f:
|
| 364 |
+
json.dump(url_cache, f, ensure_ascii=False, indent=2)
|
| 365 |
+
|
| 366 |
+
# ---------------------- Model Loading ----------------------
|
| 367 |
+
print(f"Loading tokenizer from {model_path}...")
|
| 368 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 369 |
+
if tokenizer.pad_token is None:
|
| 370 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 371 |
+
tokenizer.padding_side = 'left' # 主要是左填充
|
| 372 |
+
print("Tokenizer loaded successfully.")
|
| 373 |
+
|
| 374 |
+
# Define output directory based on model and dataset
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
# if 'qwq' in model_path.lower():
|
| 378 |
+
# if dataset_name in ['math500', 'gpqa', 'aime', 'amc', 'livecode']:
|
| 379 |
+
# output_dir = f'./{output_dir_base}/{dataset_name}.qwq.search_o1'
|
| 380 |
+
# if dataset_name == 'gpqa' and (MAX_SEARCH_LIMIT != 5 or top_k != 10):
|
| 381 |
+
# output_dir = f'./{output_dir_base}/runs.analysis/{dataset_name}.qwq.search_o1.{MAX_SEARCH_LIMIT}.{top_k}'
|
| 382 |
+
# else:
|
| 383 |
+
# output_dir = f'./{output_dir_base}/runs.qa/{dataset_name}.qwq.search_o1'
|
| 384 |
+
# else:
|
| 385 |
+
# model_short_name = model_path.split('/')[-1].lower().replace('-instruct', '')
|
| 386 |
+
# output_dir = f'./{output_dir_base}/runs.baselines/{dataset_name}.{model_short_name}.search_o1'
|
| 387 |
+
output_dir = output_dir_base
|
| 388 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 389 |
+
|
| 390 |
+
print(f"Loading model from {model_path}...")
|
| 391 |
+
print(f"device_count: {torch.cuda.device_count()}")
|
| 392 |
+
|
| 393 |
+
# Initialize the LLM
|
| 394 |
+
llm = LLM(
|
| 395 |
+
model=model_path,
|
| 396 |
+
tensor_parallel_size=torch.cuda.device_count(),
|
| 397 |
+
gpu_memory_utilization=0.95,
|
| 398 |
+
|
| 399 |
+
)
|
| 400 |
+
print("Model loaded successfully.")
|
| 401 |
+
|
| 402 |
+
# # ----------------------Loading model to reason in document ----------------------
|
| 403 |
+
|
| 404 |
+
# print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
|
| 405 |
+
# tokenizer_doc_reason = AutoTokenizer.from_pretrained(model_doc_reason_path, trust_remote_code=True)
|
| 406 |
+
# if tokenizer_doc_reason.pad_token is None:
|
| 407 |
+
# tokenizer_doc_reason.pad_token = tokenizer_doc_reason.eos_token
|
| 408 |
+
# tokenizer_doc_reason.padding_side = 'left' # 主要是左填充
|
| 409 |
+
# print("tokenizer_doc_reason loaded successfully.")
|
| 410 |
+
|
| 411 |
+
# print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
|
| 412 |
+
|
| 413 |
+
# # Initialize the LLM
|
| 414 |
+
# # torch.cuda.set_device(6,7)
|
| 415 |
+
|
| 416 |
+
# llm_doc_reason = LLM(
|
| 417 |
+
# model=model_doc_reason_path,
|
| 418 |
+
# tensor_parallel_size=2,
|
| 419 |
+
# gpu_memory_utilization=0.95,
|
| 420 |
+
|
| 421 |
+
# )
|
| 422 |
+
# print("Model_doc_reason loaded successfully.")
|
| 423 |
+
|
| 424 |
+
# ---------------------- Data Loading ----------------------
|
| 425 |
+
print(f"Loading data from {data_path}...")
|
| 426 |
+
with open(data_path, 'r', encoding='utf-8') as json_file:
|
| 427 |
+
filtered_data = json.load(json_file)
|
| 428 |
+
print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
|
| 429 |
+
|
| 430 |
+
# ---------------------- Batch Generation Function ----------------------
|
| 431 |
+
def generate_webpage_to_reasonchain_batch( # 模型基于网页内容生成推理,然后从模型的回复中提取答案
|
| 432 |
+
original_questions: List[str],
|
| 433 |
+
prev_reasonings: List[str],
|
| 434 |
+
search_queries: List[str],
|
| 435 |
+
documents: List[str],
|
| 436 |
+
dataset_name: str,
|
| 437 |
+
batch_output_records: List[Dict], # New parameter to collect outputs
|
| 438 |
+
max_tokens: int = 32768,
|
| 439 |
+
coherent: bool = False,
|
| 440 |
+
) -> List[str]:
|
| 441 |
+
|
| 442 |
+
if "Qwen2.5" in model_path:
|
| 443 |
+
max_tokens = 8192
|
| 444 |
+
|
| 445 |
+
encode_docs = tokenizer(documents, truncation=True, max_length=20000, add_special_tokens=False)["input_ids"]
|
| 446 |
+
documents = tokenizer.batch_decode(encode_docs)
|
| 447 |
+
|
| 448 |
+
# 统计每个文档的长度
|
| 449 |
+
doc_lengths = [len(doc) for doc in encode_docs]
|
| 450 |
+
|
| 451 |
+
# # 打印每个文档的长度
|
| 452 |
+
# for i, length in enumerate(doc_lengths):
|
| 453 |
+
# print(f"Document {i + 1}: {length} tokens")
|
| 454 |
+
|
| 455 |
+
# 如果需要返回长度列表,可以直接使用 doc_lengths
|
| 456 |
+
print(f"for {model_path}, set max_tokens={max_tokens} for doc gen, truncate documnets. ")
|
| 457 |
+
print("All document lengths:", doc_lengths)
|
| 458 |
+
|
| 459 |
+
user_prompts = [ # 根据之前的推理,搜索query和搜索到的doc生成用户提示
|
| 460 |
+
get_webpage_to_reasonchain_instruction_1(r, sq, doc)
|
| 461 |
+
for r, sq, doc in zip(prev_reasonings, search_queries, documents)
|
| 462 |
+
]
|
| 463 |
+
|
| 464 |
+
prompts = [{"role": "user", "content": up} for up in user_prompts]
|
| 465 |
+
prompts = [tokenizer.apply_chat_template([p], tokenize=False, add_generation_prompt=True) for p in prompts]
|
| 466 |
+
|
| 467 |
+
output = llm.generate( # 生成模型回复
|
| 468 |
+
prompts,
|
| 469 |
+
# sampling_params=SamplingParams(
|
| 470 |
+
# max_tokens=max_tokens,
|
| 471 |
+
# temperature=temperature,
|
| 472 |
+
# top_p=0.8,
|
| 473 |
+
# top_k=20,
|
| 474 |
+
# repetition_penalty=1.05,
|
| 475 |
+
# )
|
| 476 |
+
sampling_params=SamplingParams(
|
| 477 |
+
max_tokens=max_tokens,
|
| 478 |
+
temperature=0.6,
|
| 479 |
+
top_p=0.95,
|
| 480 |
+
top_k=40,
|
| 481 |
+
)
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
raw_outputs = [out.outputs[0].text for out in output]
|
| 485 |
+
extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs] # 提取模型基于网页生成的推理
|
| 486 |
+
|
| 487 |
+
for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
|
| 488 |
+
batch_output_records.append({
|
| 489 |
+
'prompt': p,
|
| 490 |
+
'raw_output': r,
|
| 491 |
+
'extracted_info': e
|
| 492 |
+
})
|
| 493 |
+
|
| 494 |
+
return extracted_infos
|
| 495 |
+
|
| 496 |
+
# ---------------------- Preparation of Input Prompts ----------------------
|
| 497 |
+
|
| 498 |
+
print(get_task_instruction_openqa("test", model_name='qwq'))
|
| 499 |
+
input_list = []
|
| 500 |
+
for item in filtered_data: # 生成prompts
|
| 501 |
+
question = item['Question']
|
| 502 |
+
|
| 503 |
+
if dataset_name in ['no_error_data_871', 'eval_old_500', 'gaia', 'frames', 'realqa', 'syn_en', 'syn_zh', 'musique_syn', 'eval', 'new', 'chinese_simpleqa', 'simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 504 |
+
if dataset_name in ['nq', 'triviaqa']:
|
| 505 |
+
instruction = get_singleqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
|
| 506 |
+
elif dataset_name in ['no_error_data_871', 'eval_old_500', 'gaia', 'frames', 'realqa', 'syn_en', 'syn_zh', 'musique_syn', 'eval', 'new', 'chinese_simpleqa', 'simpleqa','hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 507 |
+
instruction = get_multiqa_search_o1_instruction_4(MAX_SEARCH_LIMIT)
|
| 508 |
+
# if 'qwq' in model_path.lower():
|
| 509 |
+
# user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 510 |
+
# else:
|
| 511 |
+
# user_prompt = get_task_instruction_openqa(question)
|
| 512 |
+
user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 513 |
+
|
| 514 |
+
elif dataset_name in ['math500', 'aime', 'amc']:
|
| 515 |
+
instruction = get_math_search_o1_instruction_1(MAX_SEARCH_LIMIT)
|
| 516 |
+
# if 'qwq' in model_path.lower():
|
| 517 |
+
# user_prompt = get_task_instruction_math(question, model_name='qwq')
|
| 518 |
+
# else:
|
| 519 |
+
# user_prompt = get_task_instruction_math(question)
|
| 520 |
+
user_prompt = get_task_instruction_math(question, model_name='qwq')
|
| 521 |
+
|
| 522 |
+
elif dataset_name == 'gpqa':
|
| 523 |
+
instruction = get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 524 |
+
# if 'qwq' in model_path.lower():
|
| 525 |
+
# user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
|
| 526 |
+
# elif 'llama' in model_path.lower():
|
| 527 |
+
# user_prompt = get_task_instruction_multi_choice(question, model_name='llama')
|
| 528 |
+
# else:
|
| 529 |
+
# user_prompt = get_task_instruction_multi_choice(question)
|
| 530 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
|
| 531 |
+
|
| 532 |
+
elif dataset_name == 'livecode':
|
| 533 |
+
instruction = get_code_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 534 |
+
question_title = item.get('question_title', '')
|
| 535 |
+
# if 'qwq' in model_path.lower():
|
| 536 |
+
# user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
|
| 537 |
+
# else:
|
| 538 |
+
# user_prompt = get_task_instruction_code(question)
|
| 539 |
+
user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
|
| 540 |
+
else:
|
| 541 |
+
# user_prompt = "" # Default to empty if dataset not matched
|
| 542 |
+
instruction = get_multiqa_search_o1_instruction_4(MAX_SEARCH_LIMIT)
|
| 543 |
+
# if 'qwq' in model_path.lower():
|
| 544 |
+
# user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 545 |
+
# else:
|
| 546 |
+
# user_prompt = get_task_instruction_openqa(question)
|
| 547 |
+
user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
prompt = [{"role": "user", "content": instruction + user_prompt}] # instruction是告诉模型怎么进行搜索,user_prompt是用户具体问题
|
| 551 |
+
prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
|
| 552 |
+
input_list.append(prompt)
|
| 553 |
+
|
| 554 |
+
if subset_num != -1:
|
| 555 |
+
input_list = input_list[:subset_num]
|
| 556 |
+
filtered_data = filtered_data[:subset_num]
|
| 557 |
+
|
| 558 |
+
# Initialize active sequences
|
| 559 |
+
active_sequences = [{ # 记录每个问题的搜索历史
|
| 560 |
+
'item': item,
|
| 561 |
+
'prompt': prompt,
|
| 562 |
+
'output': '',
|
| 563 |
+
'finished': False, # 一开始均为未完成
|
| 564 |
+
'history': [],
|
| 565 |
+
'search_count': 0,
|
| 566 |
+
'executed_search_queries': set(),
|
| 567 |
+
'all_info': [],
|
| 568 |
+
} for item, prompt in zip(filtered_data, input_list)]
|
| 569 |
+
|
| 570 |
+
# ---------------------- Set Max Tokens ----------------------
|
| 571 |
+
# if 'qwq' in model_path.lower():
|
| 572 |
+
# if dataset_name in ['aime', 'amc', 'livecode']:
|
| 573 |
+
# max_tokens = 32768
|
| 574 |
+
# else:
|
| 575 |
+
# max_tokens = 20480
|
| 576 |
+
# else:
|
| 577 |
+
# max_tokens = 8192
|
| 578 |
+
# max_tokens = 16384
|
| 579 |
+
if dataset_name in ['aime', 'amc', 'livecode']:
|
| 580 |
+
max_tokens = 32768
|
| 581 |
+
else:
|
| 582 |
+
max_tokens = 20480
|
| 583 |
+
# ---------------------- Generation Function ----------------------
|
| 584 |
+
def run_generation(sequences: List[Dict], max_tokens: int) -> List:
|
| 585 |
+
prompts = [s['prompt'] for s in sequences] # 提取prompt
|
| 586 |
+
# sampling_params = SamplingParams(
|
| 587 |
+
# max_tokens=max_tokens,
|
| 588 |
+
# temperature=temperature,
|
| 589 |
+
# top_p=top_p,
|
| 590 |
+
# top_k=top_k_sampling,
|
| 591 |
+
# repetition_penalty=repetition_penalty,
|
| 592 |
+
# stop=[END_SEARCH_QUERY, tokenizer.eos_token],
|
| 593 |
+
# include_stop_str_in_output=True,
|
| 594 |
+
# )
|
| 595 |
+
sampling_params = SamplingParams(
|
| 596 |
+
max_tokens=max_tokens,
|
| 597 |
+
temperature=0.6,
|
| 598 |
+
top_p=0.95,
|
| 599 |
+
top_k=40,
|
| 600 |
+
stop=[END_SEARCH_QUERY, tokenizer.eos_token],
|
| 601 |
+
include_stop_str_in_output=True,
|
| 602 |
+
)
|
| 603 |
+
output_list = llm.generate(prompts, sampling_params=sampling_params) # 模型根据prompt生成回答
|
| 604 |
+
print(f"run_generation completed {len(output_list)}")
|
| 605 |
+
return output_list
|
| 606 |
+
|
| 607 |
+
# Function to extract text between two tags 提取位于 start_tag 和 end_tag 之间的内容
|
| 608 |
+
def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
|
| 609 |
+
pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
|
| 610 |
+
matches = re.findall(pattern, text, flags=re.DOTALL)
|
| 611 |
+
if matches:
|
| 612 |
+
return matches[-1].strip()
|
| 613 |
+
return None
|
| 614 |
+
|
| 615 |
+
def replace_recent_steps(origin_str, replace_str): # 使用replace_str更新origin_str
|
| 616 |
+
"""
|
| 617 |
+
Replaces specific steps in the original reasoning steps with new steps.
|
| 618 |
+
If a replacement step contains "DELETE THIS STEP", that step is removed.
|
| 619 |
+
|
| 620 |
+
Parameters:
|
| 621 |
+
- origin_str (str): The original reasoning steps.
|
| 622 |
+
- replace_str (str): The steps to replace or delete.
|
| 623 |
+
|
| 624 |
+
Returns:
|
| 625 |
+
- str: The updated reasoning steps after applying replacements.
|
| 626 |
+
这个函数的主要功能是替换给定的推理步骤(reasoning steps),
|
| 627 |
+
并根据传入的 replace_str 进行更新。
|
| 628 |
+
如果新的步骤包含 "DELETE THIS STEP",则删除该步骤
|
| 629 |
+
"""
|
| 630 |
+
|
| 631 |
+
def parse_steps(text):
|
| 632 |
+
"""
|
| 633 |
+
Parses the reasoning steps from a given text.
|
| 634 |
+
|
| 635 |
+
Parameters:
|
| 636 |
+
- text (str): The text containing reasoning steps.
|
| 637 |
+
|
| 638 |
+
Returns:
|
| 639 |
+
- dict: A dictionary mapping step numbers to their content.
|
| 640 |
+
"""
|
| 641 |
+
step_pattern = re.compile(r"Step\s+(\d+):\s*") # 这个模式会匹配 "Step" 后面跟一个或多个空格,然后是一个数字(步骤编号),最后是冒号
|
| 642 |
+
steps = {}
|
| 643 |
+
current_step_num = None
|
| 644 |
+
current_content = []
|
| 645 |
+
|
| 646 |
+
for line in text.splitlines(): # 将输入的文本按行分割,并逐行遍历。每一行会被检查是否包含一个步骤
|
| 647 |
+
step_match = step_pattern.match(line)
|
| 648 |
+
if step_match: # 匹配到一个新的步骤
|
| 649 |
+
# If there's an ongoing step, save its content,如果当前的步骤不为空,将其为上一个步骤,先将上一个步骤的内容(存在current_content中)保存,然后再更新current_step_num和current_content
|
| 650 |
+
if current_step_num is not None:
|
| 651 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 652 |
+
current_step_num = int(step_match.group(1))
|
| 653 |
+
content = line[step_match.end():].strip()
|
| 654 |
+
current_content = [content] if content else []
|
| 655 |
+
else:
|
| 656 |
+
if current_step_num is not None:
|
| 657 |
+
current_content.append(line)
|
| 658 |
+
|
| 659 |
+
# Save the last step if any
|
| 660 |
+
if current_step_num is not None: # 保存最后一个步骤
|
| 661 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 662 |
+
|
| 663 |
+
return steps
|
| 664 |
+
|
| 665 |
+
# Parse the original and replacement steps
|
| 666 |
+
origin_steps = parse_steps(origin_str) # 解析原始的推理步骤
|
| 667 |
+
replace_steps = parse_steps(replace_str) # 解析要替换的推理步骤
|
| 668 |
+
|
| 669 |
+
# Apply replacements
|
| 670 |
+
for step_num, content in replace_steps.items(): # 遍历要替换的步骤
|
| 671 |
+
if "DELETE THIS STEP" in content:
|
| 672 |
+
# Remove the step if it exists
|
| 673 |
+
if step_num in origin_steps: # 如果要删除的步骤在原始的推理步骤中存在,则删除该步骤
|
| 674 |
+
del origin_steps[step_num]
|
| 675 |
+
else: # 如果要替换的步骤不是要删除的步骤,则替换该步骤
|
| 676 |
+
# Replace or add the step
|
| 677 |
+
origin_steps[step_num] = content
|
| 678 |
+
|
| 679 |
+
# Sort the steps by step number
|
| 680 |
+
sorted_steps = sorted(origin_steps.items()) # 按照步骤编号对步骤进行排序
|
| 681 |
+
|
| 682 |
+
# Reconstruct the reasoning steps as a single string
|
| 683 |
+
new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps]) # 根据排序后的步骤构建新的推理步骤字符串,步骤之间以两个换行符分隔
|
| 684 |
+
|
| 685 |
+
return new_reasoning_steps
|
| 686 |
+
|
| 687 |
+
# ---------------------- Initialize Collection Structure ----------------------
|
| 688 |
+
# Initialize a list to collect batch outputs
|
| 689 |
+
batch_output_records = []
|
| 690 |
+
|
| 691 |
+
start_time = time.time()
|
| 692 |
+
turn = 0
|
| 693 |
+
|
| 694 |
+
# 流程
|
| 695 |
+
# 首先根据prompt让模型生成回复
|
| 696 |
+
# 从模型的回复中提取搜索查询
|
| 697 |
+
# 如果有(回复要以END_SEARCH_QUERY结尾)
|
| 698 |
+
# 根据搜索查询,从互联网上提取相关信息
|
| 699 |
+
# 处理查询的信息
|
| 700 |
+
# 让模型基于之前的步骤,检索query和查询到的信息生成新的推理,得到search result
|
| 701 |
+
# 然后回到第一步(这里模型就会根据前面的search result,再次生成新的回复
|
| 702 |
+
# 如果没有查询则该条问题结束
|
| 703 |
+
|
| 704 |
+
# Main loop until all sequences are finished or maximum turns reached
|
| 705 |
+
while True:
|
| 706 |
+
# Identify sequences that need generation
|
| 707 |
+
sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']] # 筛选出需要生成的新内容的序列,active_sequences 是一个包含所有活跃序列的列表
|
| 708 |
+
|
| 709 |
+
if sequences_needing_generation:
|
| 710 |
+
turn += 1
|
| 711 |
+
print(f'\n-------------- Turn {turn} --------------')
|
| 712 |
+
print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
|
| 713 |
+
outputs = run_generation(sequences_needing_generation, max_tokens) # 根据prompt
|
| 714 |
+
print("Generation completed, processing outputs...")
|
| 715 |
+
|
| 716 |
+
# Initialize batch variables
|
| 717 |
+
batch_relevant_info = []
|
| 718 |
+
batch_original_questions = []
|
| 719 |
+
batch_prev_reasonings = []
|
| 720 |
+
batch_search_queries = []
|
| 721 |
+
batch_documents = []
|
| 722 |
+
batch_sequences = []
|
| 723 |
+
|
| 724 |
+
# Collect URLs to fetch across all sequences
|
| 725 |
+
all_urls_to_fetch = set() # 初始化一个集合 all_urls_to_fetch 用来收集所有需要获取的 URL
|
| 726 |
+
url_snippets = {}
|
| 727 |
+
url_sequence_map = {} # Map URL to list of sequences needing it
|
| 728 |
+
|
| 729 |
+
start_search_time = time.time()
|
| 730 |
+
# Process each sequence and collect URLs
|
| 731 |
+
for seq, out in zip(sequences_needing_generation, outputs): # 遍历需要生成新内容的序列,并生成新内容,同时收集需要获取的 URL
|
| 732 |
+
text = out.outputs[0].text # 将生成的文本添加到序列的历史记录、提示和输出中
|
| 733 |
+
seq['history'].append(text)
|
| 734 |
+
# Append generated text to prompt and output
|
| 735 |
+
seq['prompt'] += text
|
| 736 |
+
seq['output'] += text
|
| 737 |
+
seq['all_info'].append({f"turn_{turn}_reason": text})
|
| 738 |
+
# Extract search query
|
| 739 |
+
search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY) # 提取搜索查询
|
| 740 |
+
|
| 741 |
+
# If a search query is present and needs to be executed
|
| 742 |
+
if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
|
| 743 |
+
if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
|
| 744 |
+
# Execute search, use cache if available
|
| 745 |
+
if search_query in search_cache:
|
| 746 |
+
results = search_cache[search_query] # 如果搜索查询结果在缓存中存在,则从缓存中获取结果
|
| 747 |
+
print(f"Using cached search results for query: \"{search_query}\"")
|
| 748 |
+
else:
|
| 749 |
+
try:
|
| 750 |
+
if is_exclude_urls and "urls" in seq["item"]["metadata"]:
|
| 751 |
+
print(f"is_exclude_urls: {is_exclude_urls}")
|
| 752 |
+
exclude_urls = seq["item"]["metadata"]["urls"]
|
| 753 |
+
else:
|
| 754 |
+
exclude_urls = []
|
| 755 |
+
|
| 756 |
+
print(f"Execute and cache search for query: \"{search_query}\"")
|
| 757 |
+
results = bing_web_search(search_query, bing_subscription_key, bing_endpoint, market='en-US', language='en', exclude_urls=exclude_urls) # 执行搜索
|
| 758 |
+
search_cache[search_query] = results # 将搜索结果添加到缓存中
|
| 759 |
+
print(f"Executed and cached search for query: \"{search_query}\"")
|
| 760 |
+
except Exception as e:
|
| 761 |
+
print(f"Error during search query '{search_query}': {e}")
|
| 762 |
+
search_cache[search_query] = {}
|
| 763 |
+
results = {}
|
| 764 |
+
|
| 765 |
+
# Extract relevant information from Bing search results
|
| 766 |
+
relevant_info = extract_relevant_info(results)[:top_k] # 从搜索结果中提取出最相关的信息
|
| 767 |
+
seq['relevant_info'] = relevant_info
|
| 768 |
+
|
| 769 |
+
# Extract URLs and snippets
|
| 770 |
+
urls_to_fetch = [it['url'] for it in relevant_info] # 从搜索结果中提取出所有 URL
|
| 771 |
+
snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info} # 创建一个字典 snippets,将 URL 映射到它们对应的片段(snippet)。如果 snippet 字段存在,则把它加入字典
|
| 772 |
+
|
| 773 |
+
# Filter URLs that are not cached
|
| 774 |
+
urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache] # 筛选出所有没有被缓存的 UR
|
| 775 |
+
cached_urls = [u for u in urls_to_fetch if u in url_cache] # 选出已经缓存的 URL(即在 url_cache 中存在的 URL)。这些 URL 已经被处理过,不需要再次请求
|
| 776 |
+
|
| 777 |
+
# Store info for all_urls_to_fetch and url_snippets
|
| 778 |
+
for url in urls_to_fetch_filtered:
|
| 779 |
+
all_urls_to_fetch.add(url)
|
| 780 |
+
url_snippets[url] = snippets.get(url, "") # 将每个 URL 对应的片段存储到 url_snippets 字典中
|
| 781 |
+
|
| 782 |
+
all_reasoning_steps = seq['output']
|
| 783 |
+
all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n") # 将连续的空行(\n\n)替换为单个换行符(\n),然后按换行符拆分文本,得到每个推理步骤单独的一行
|
| 784 |
+
|
| 785 |
+
truncated_prev_reasoning = ""
|
| 786 |
+
for i, step in enumerate(all_reasoning_steps):
|
| 787 |
+
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n" # 遍历 all_reasoning_steps 中的每个步骤,并将每个步骤编号和步骤内容格式化后,添加到 truncated_prev_reasoning 字符串中。这样生成一个包含步骤编号和内容的字符串
|
| 788 |
+
|
| 789 |
+
prev_steps = truncated_prev_reasoning.split('\n\n') # 将推理步骤字符串 truncated_prev_reasoning 按照每两个换行符拆分成多个步骤
|
| 790 |
+
if len(prev_steps) <= 5: # 如果步骤的数量不超过 5,直接保留所有步骤
|
| 791 |
+
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
| 792 |
+
else:
|
| 793 |
+
truncated_prev_reasoning = ''
|
| 794 |
+
for i, step in enumerate(prev_steps): # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 795 |
+
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
| 796 |
+
truncated_prev_reasoning += step + '\n\n'
|
| 797 |
+
else: # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 798 |
+
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
| 799 |
+
truncated_prev_reasoning += '...\n\n'
|
| 800 |
+
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
| 801 |
+
|
| 802 |
+
# Collect parameters for batch processing
|
| 803 |
+
batch_relevant_info.append(relevant_info) # 搜索出来的信息
|
| 804 |
+
batch_original_questions.append(seq['item']['Question']) # 原始问题
|
| 805 |
+
batch_prev_reasonings.append(truncated_prev_reasoning) # 之前的推理步骤
|
| 806 |
+
batch_search_queries.append(search_query) # 搜索查询
|
| 807 |
+
batch_sequences.append(seq)
|
| 808 |
+
|
| 809 |
+
# Update search count and executed queries
|
| 810 |
+
seq['search_count'] += 1 # 更新搜索计数
|
| 811 |
+
seq['executed_search_queries'].add(search_query) # 将已执行的搜索查询添加到集合中
|
| 812 |
+
|
| 813 |
+
elif seq['search_count'] >= MAX_SEARCH_LIMIT: # 如果搜索次数达到或超过该限制,则返回一条消息,通知该查询无法再进行
|
| 814 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
| 815 |
+
seq['prompt'] += limit_message
|
| 816 |
+
seq['output'] += limit_message
|
| 817 |
+
seq['history'].append(limit_message)
|
| 818 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 819 |
+
print(f"Search limit reached for query: \"{search_query}\"")
|
| 820 |
+
|
| 821 |
+
elif search_query in seq['executed_search_queries']: # 如果当前查询已经执行过,则返回一个消息,提示用户查询已重复,并引导其查看之前的结果
|
| 822 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
| 823 |
+
seq['prompt'] += limit_message
|
| 824 |
+
seq['output'] += limit_message
|
| 825 |
+
seq['history'].append(limit_message)
|
| 826 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 827 |
+
print(f"Repeated search for query: \"{search_query}\"")
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
else: # 如果当前序列没有需要执行的搜索查询,则将该序列标记为完成,并打印提示信息
|
| 831 |
+
# If no search query needs to be executed, mark the sequence as finished
|
| 832 |
+
seq['finished'] = True
|
| 833 |
+
print("Sequence marked as complete.")
|
| 834 |
+
|
| 835 |
+
print(f"get search time taken: {time.time() - start_search_time}")
|
| 836 |
+
print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
|
| 837 |
+
print(f"all_urls_to_fetch: {all_urls_to_fetch}")
|
| 838 |
+
# Batch fetch all URLs at once to optimize speed
|
| 839 |
+
|
| 840 |
+
if all_urls_to_fetch:
|
| 841 |
+
print(f"Fetching {len(all_urls_to_fetch)} URLs...")
|
| 842 |
+
try:
|
| 843 |
+
fetched_contents = fetch_page_content( # 一次性获取所有 URL 的中搜索出来的内容
|
| 844 |
+
list(all_urls_to_fetch),
|
| 845 |
+
use_jina=use_jina,
|
| 846 |
+
jina_api_key=jina_api_key,
|
| 847 |
+
# snippets=url_snippets # Do not pass snippets when updating url_cache directly
|
| 848 |
+
)
|
| 849 |
+
print(f"Fetched {len(fetched_contents)} URLs successfully.")
|
| 850 |
+
except Exception as e:
|
| 851 |
+
print(f"Error during batch URL fetching: {e}")
|
| 852 |
+
fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
|
| 853 |
+
# Update cache with fetched contents
|
| 854 |
+
for url, content in fetched_contents.items(): # 将获取的内容添加到 url_cache 中
|
| 855 |
+
url_cache[url] = content
|
| 856 |
+
|
| 857 |
+
# After fetching, prepare formatted documents for batch processing
|
| 858 |
+
for relevant_info in batch_relevant_info:
|
| 859 |
+
formatted_documents = "" # 初始化一个空字符串 formatted_documents,用于拼接本次要处理的所有网页信息。后面会将其添加到 batch_documents 列表中
|
| 860 |
+
for i, doc_info in enumerate(relevant_info):
|
| 861 |
+
url = doc_info['url']
|
| 862 |
+
raw_context = url_cache.get(url, "") # 获取 url 对应的内容
|
| 863 |
+
doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
|
| 864 |
+
success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
|
| 865 |
+
if success:
|
| 866 |
+
print("extract_snippet_with_context")
|
| 867 |
+
context = filtered_context
|
| 868 |
+
else: # 否则就取 raw_context 的前 max_doc_len * 2 个字符,作为一个有限的片段,避免过长导致后续处理负担
|
| 869 |
+
print(f"use raw_context, {len(raw_context)}")
|
| 870 |
+
context = raw_context[:max_doc_len*2]
|
| 871 |
+
|
| 872 |
+
doc_info['context'] = context
|
| 873 |
+
formatted_documents += f"**Web Page {i + 1}:**\n"
|
| 874 |
+
formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
|
| 875 |
+
print(f'formatted_documents: {len(formatted_documents)}')
|
| 876 |
+
batch_documents.append(formatted_documents) # 将本组搜索结果的所有信息(拼接成的字符串 formatted_documents)添加到 batch_documents 列表中
|
| 877 |
+
|
| 878 |
+
# After fetching, prepare for batch processing if there are any
|
| 879 |
+
if batch_sequences:
|
| 880 |
+
print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
|
| 881 |
+
webpage_analyses = generate_webpage_to_reasonchain_batch( # 根据前面处理好的结果,生成新的推理
|
| 882 |
+
original_questions=batch_original_questions,
|
| 883 |
+
prev_reasonings=batch_prev_reasonings,
|
| 884 |
+
search_queries=batch_search_queries,
|
| 885 |
+
documents=batch_documents,
|
| 886 |
+
dataset_name=dataset_name,
|
| 887 |
+
batch_output_records=batch_output_records, # Pass the collection list
|
| 888 |
+
max_tokens=max_tokens,
|
| 889 |
+
)
|
| 890 |
+
print("Batch generation completed, assigning outputs to sequences...")
|
| 891 |
+
|
| 892 |
+
for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents): # 遍历批处理返回的 webpage_analyses,将处理结果与相应的序列 seq 进行一一对应
|
| 893 |
+
if isinstance(analysis, str): # 判断 analysis 是否是纯字符串。如果是字符串,说明直接可以追加到序列的文本中
|
| 894 |
+
append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n" # 封装处理结果,添加到序列的历史记录、提示和输出中
|
| 895 |
+
seq['prompt'] += append_text
|
| 896 |
+
seq['output'] += append_text
|
| 897 |
+
seq['history'].append(append_text) # 存的是每一次的webpage_analyses
|
| 898 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 899 |
+
else: # 如果 analysis 不是纯字符串,那么可能是一种特殊的结构,比如表示需要替换推理步骤的 dict 或其他格式
|
| 900 |
+
append_text = replace_recent_steps(seq['output'], analysis)
|
| 901 |
+
seq['prompt'] += append_text
|
| 902 |
+
seq['output'] += append_text
|
| 903 |
+
seq['history'].append(append_text)
|
| 904 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 905 |
+
|
| 906 |
+
# Check if all sequences are finished
|
| 907 |
+
# 保存active_sequences
|
| 908 |
+
active_sequences_part = [{ # 记录每个问题的搜索历史
|
| 909 |
+
'item': ele["item"],
|
| 910 |
+
'prompt': ele['prompt'],
|
| 911 |
+
'output': ele["output"],
|
| 912 |
+
'finished': ele["finished"], # 一开始均为未完成
|
| 913 |
+
'history':ele["history"],
|
| 914 |
+
'search_count': ele["search_count"],
|
| 915 |
+
'all_info': ele['all_info']
|
| 916 |
+
} for ele in active_sequences]
|
| 917 |
+
with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
|
| 918 |
+
json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
|
| 919 |
+
unfinished = [seq for seq in active_sequences if not seq['finished']] # 是否结束是基于模型是否生成了新的搜索
|
| 920 |
+
if not unfinished:
|
| 921 |
+
break
|
| 922 |
+
else:
|
| 923 |
+
if turn >= MAX_TURN:
|
| 924 |
+
print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
|
| 925 |
+
break
|
| 926 |
+
|
| 927 |
+
total_time = time.time() - start_time
|
| 928 |
+
print(f"Total time taken: {total_time} seconds")
|
| 929 |
+
|
| 930 |
+
# ---------------------- Save Batch Output Records to JSON File ----------------------
|
| 931 |
+
# Define output JSON file path
|
| 932 |
+
t = time.localtime()
|
| 933 |
+
batch_output_file = os.path.join(output_dir, f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
|
| 934 |
+
|
| 935 |
+
# Save batch_output_records to JSON file
|
| 936 |
+
with open(batch_output_file, 'w', encoding='utf-8') as f: # 这里存的是webpage推理时的输入和输出和提取后的信息
|
| 937 |
+
json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
|
| 938 |
+
|
| 939 |
+
print(f"Batch outputs saved to {batch_output_file}")
|
| 940 |
+
|
| 941 |
+
# Prepare output list for evaluation
|
| 942 |
+
output_list = [seq['output'] for seq in active_sequences]
|
| 943 |
+
|
| 944 |
+
# Run evaluation
|
| 945 |
+
if dataset_name in ['no_error_data_871', 'eval_old_500', "eval", "musique_syn", "gaia", "realqa"]:
|
| 946 |
+
run_evaluation_for_eval(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
|
| 947 |
+
else:
|
| 948 |
+
run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
|
| 949 |
+
|
| 950 |
+
# 评测has answer信息
|
| 951 |
+
turn_files = os.listdir(output_dir)
|
| 952 |
+
turn_files = [file for file in turn_files if file.startswith("turn_")]
|
| 953 |
+
max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
|
| 954 |
+
|
| 955 |
+
max_turn_file_path = os.path.join(output_dir, max_turn_file)
|
| 956 |
+
print(f"max_turn_file_path: {max_turn_file_path}")
|
| 957 |
+
add_eval(model_path, max_turn_file_path)
|
| 958 |
+
|
| 959 |
+
# ---------------------- Update Search and URL Cache ----------------------
|
| 960 |
+
print('Updating Search and URL Cache...')
|
| 961 |
+
# Load existing caches or initialize empty dictionaries
|
| 962 |
+
if os.path.exists(search_cache_path):
|
| 963 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 964 |
+
search_cache_new = json.load(f)
|
| 965 |
+
else:
|
| 966 |
+
search_cache_new = {}
|
| 967 |
+
|
| 968 |
+
if os.path.exists(url_cache_path):
|
| 969 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 970 |
+
url_cache_new = json.load(f)
|
| 971 |
+
else:
|
| 972 |
+
url_cache_new = {}
|
| 973 |
+
|
| 974 |
+
search_cache.update(search_cache_new)
|
| 975 |
+
url_cache.update(url_cache_new)
|
| 976 |
+
|
| 977 |
+
save_caches()
|
| 978 |
+
|
| 979 |
+
print("Process completed.")
|
| 980 |
+
|
| 981 |
+
if __name__ == "__main__":
|
| 982 |
+
main()
|
deep_search/search_o1/scripts/search_o1_sum_single_page_test_new_prompt.py
ADDED
|
@@ -0,0 +1,997 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_search_o1.py
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import re
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import string
|
| 10 |
+
from typing import Optional, Tuple, List, Dict
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
from transformers import AutoTokenizer
|
| 14 |
+
from vllm import LLM, SamplingParams
|
| 15 |
+
|
| 16 |
+
from bing_search import (
|
| 17 |
+
bing_web_search,
|
| 18 |
+
extract_relevant_info,
|
| 19 |
+
fetch_page_content,
|
| 20 |
+
extract_snippet_with_context
|
| 21 |
+
)
|
| 22 |
+
from evaluate import (
|
| 23 |
+
run_evaluation,
|
| 24 |
+
run_evaluation_for_eval,
|
| 25 |
+
extract_answer
|
| 26 |
+
)
|
| 27 |
+
from prompts import (
|
| 28 |
+
get_gpqa_search_o1_instruction,
|
| 29 |
+
get_math_search_o1_instruction,
|
| 30 |
+
get_code_search_o1_instruction,
|
| 31 |
+
get_singleqa_search_o1_instruction,
|
| 32 |
+
get_multiqa_search_o1_instruction,
|
| 33 |
+
get_webpage_to_reasonchain_instruction,
|
| 34 |
+
get_task_instruction_openqa,
|
| 35 |
+
get_task_instruction_math,
|
| 36 |
+
get_task_instruction_multi_choice,
|
| 37 |
+
get_task_instruction_code,
|
| 38 |
+
get_singleqa_search_o1_instruction_1,
|
| 39 |
+
get_multiqa_search_o1_instruction_1,
|
| 40 |
+
get_webpage_to_reasonchain_instruction_1,
|
| 41 |
+
get_math_search_o1_instruction_1,
|
| 42 |
+
get_multiqa_search_o1_instruction_3
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
from openai import OpenAI
|
| 47 |
+
|
| 48 |
+
from add_eval import add_eval
|
| 49 |
+
# Modify OpenAI's API key and API base to use vLLM's API server.
|
| 50 |
+
# 使用 vLLM 的 API 服务器需要修改 OpenAI 的 API 密钥和 API 库。
|
| 51 |
+
|
| 52 |
+
# openai_api_key = "EMPTY"
|
| 53 |
+
# openai_api_base = "http://localhost:8000/v1"
|
| 54 |
+
# client = OpenAI(
|
| 55 |
+
# api_key=openai_api_key,
|
| 56 |
+
# base_url=openai_api_base,
|
| 57 |
+
# )
|
| 58 |
+
|
| 59 |
+
# Define special tokens
|
| 60 |
+
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
| 61 |
+
END_SEARCH_QUERY = "<|end_search_query|>"
|
| 62 |
+
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
| 63 |
+
END_SEARCH_RESULT = "<|end_search_result|>"
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# os.environ['http_proxy'] = 'http://127.0.0.1:7880'
|
| 67 |
+
# os.environ['https_proxy'] = 'http://127.0.0.1:7880'
|
| 68 |
+
|
| 69 |
+
# 增加了cache共享和has answer评测,truncate doc
|
| 70 |
+
|
| 71 |
+
def parse_args():
|
| 72 |
+
parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
|
| 73 |
+
|
| 74 |
+
# Dataset and split configuration
|
| 75 |
+
parser.add_argument(
|
| 76 |
+
'--dataset_name',
|
| 77 |
+
type=str,
|
| 78 |
+
required=True,
|
| 79 |
+
choices=['eval', 'simpleqa','chinese_simpleqa','gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
|
| 80 |
+
help="Name of the dataset to use."
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
parser.add_argument(
|
| 84 |
+
'--split',
|
| 85 |
+
type=str,
|
| 86 |
+
required=True,
|
| 87 |
+
choices=['test', 'diamond', 'main', 'extended', 'gen'],
|
| 88 |
+
help="Dataset split to use."
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
parser.add_argument(
|
| 92 |
+
'--subset_num',
|
| 93 |
+
type=int,
|
| 94 |
+
default=-1,
|
| 95 |
+
help="Number of examples to process. Defaults to all if not specified."
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
# Search and document retrieval configuration
|
| 99 |
+
parser.add_argument(
|
| 100 |
+
'--max_search_limit',
|
| 101 |
+
type=int,
|
| 102 |
+
default=10,
|
| 103 |
+
help="Maximum number of searches per question."
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
parser.add_argument(
|
| 107 |
+
'--max_turn',
|
| 108 |
+
type=int,
|
| 109 |
+
default=15,
|
| 110 |
+
help="Maximum number of turns."
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
parser.add_argument( # 使用搜索引擎时,返回的最大文档数
|
| 114 |
+
'--top_k',
|
| 115 |
+
type=int,
|
| 116 |
+
default=10,
|
| 117 |
+
help="Maximum number of search documents to return."
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
parser.add_argument(
|
| 121 |
+
'--max_doc_len',
|
| 122 |
+
type=int,
|
| 123 |
+
default=3000,
|
| 124 |
+
help="Maximum length of each searched document."
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# parser.add_argument(
|
| 128 |
+
# '--use_jina',
|
| 129 |
+
# type=bool,
|
| 130 |
+
# default=False,
|
| 131 |
+
# help="Whether to use Jina API for document fetching."
|
| 132 |
+
# )
|
| 133 |
+
parser.add_argument(
|
| 134 |
+
'--use_jina',
|
| 135 |
+
action='store_true',
|
| 136 |
+
help="Whether to use Jina API for document fetching."
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
parser.add_argument(
|
| 140 |
+
'--jina_api_key',
|
| 141 |
+
type=str,
|
| 142 |
+
default='None',
|
| 143 |
+
help="Your Jina API Key to Fetch URL Content."
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Model configuration
|
| 147 |
+
parser.add_argument(
|
| 148 |
+
'--model_path',
|
| 149 |
+
type=str,
|
| 150 |
+
required=True,
|
| 151 |
+
help="Path to the pre-trained model."
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
# Sampling parameters
|
| 155 |
+
parser.add_argument(
|
| 156 |
+
'--temperature',
|
| 157 |
+
type=float,
|
| 158 |
+
default=0.7,
|
| 159 |
+
help="Sampling temperature."
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
parser.add_argument(
|
| 163 |
+
'--top_p',
|
| 164 |
+
type=float,
|
| 165 |
+
default=0.8,
|
| 166 |
+
help="Top-p sampling parameter."
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
parser.add_argument(
|
| 170 |
+
'--top_k_sampling',
|
| 171 |
+
type=int,
|
| 172 |
+
default=20,
|
| 173 |
+
help="Top-k sampling parameter."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
parser.add_argument(
|
| 177 |
+
'--repetition_penalty',
|
| 178 |
+
type=float,
|
| 179 |
+
default=None,
|
| 180 |
+
help="Repetition penalty. If not set, defaults based on the model."
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
parser.add_argument(
|
| 184 |
+
'--max_tokens',
|
| 185 |
+
type=int,
|
| 186 |
+
default=32768,
|
| 187 |
+
help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
# Bing API Configuration
|
| 191 |
+
parser.add_argument(
|
| 192 |
+
'--bing_subscription_key',
|
| 193 |
+
type=str,
|
| 194 |
+
required=True,
|
| 195 |
+
help="Bing Search API subscription key."
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
parser.add_argument(
|
| 199 |
+
'--bing_endpoint',
|
| 200 |
+
type=str,
|
| 201 |
+
default="https://api.bing.microsoft.com/v7.0/search",
|
| 202 |
+
help="Bing Search API endpoint."
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
parser.add_argument(
|
| 206 |
+
'--cache_dir_base',
|
| 207 |
+
type=str,
|
| 208 |
+
required=True,
|
| 209 |
+
help="cache path."
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
parser.add_argument(
|
| 213 |
+
'--output_dir_base',
|
| 214 |
+
type=str,
|
| 215 |
+
required=True,
|
| 216 |
+
help="output_dir"
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
parser.add_argument(
|
| 220 |
+
'--is_exclude_urls',
|
| 221 |
+
action="store_true",
|
| 222 |
+
help="is_exclude_urls"
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
# parser.add_argument(
|
| 226 |
+
# '--model_doc_reason_path',
|
| 227 |
+
# type=str,
|
| 228 |
+
# required=True,
|
| 229 |
+
# help="Path to the document reasoning model."
|
| 230 |
+
# )
|
| 231 |
+
|
| 232 |
+
# openai_api_base
|
| 233 |
+
# parser.add_argument(
|
| 234 |
+
# '--openai_api_base',
|
| 235 |
+
# type=str,
|
| 236 |
+
# required=True,
|
| 237 |
+
# help="openai_api_base"
|
| 238 |
+
# )
|
| 239 |
+
# parser.add_argument(
|
| 240 |
+
# '--data_path',
|
| 241 |
+
# type=str,
|
| 242 |
+
# required=True,
|
| 243 |
+
# help="Path to the document reasoning model."
|
| 244 |
+
# )
|
| 245 |
+
return parser.parse_args()
|
| 246 |
+
|
| 247 |
+
def main():
|
| 248 |
+
args = parse_args()
|
| 249 |
+
print(f"args.use_jina: {args.use_jina}")
|
| 250 |
+
# Extract arguments
|
| 251 |
+
dataset_name = args.dataset_name
|
| 252 |
+
split = args.split
|
| 253 |
+
subset_num = args.subset_num
|
| 254 |
+
MAX_SEARCH_LIMIT = args.max_search_limit
|
| 255 |
+
MAX_TURN = args.max_turn
|
| 256 |
+
top_k = args.top_k
|
| 257 |
+
max_doc_len = args.max_doc_len
|
| 258 |
+
model_path = args.model_path
|
| 259 |
+
# model_doc_reason_path = args.model_doc_reason_path
|
| 260 |
+
temperature = args.temperature
|
| 261 |
+
top_p = args.top_p
|
| 262 |
+
top_k_sampling = args.top_k_sampling
|
| 263 |
+
repetition_penalty = args.repetition_penalty
|
| 264 |
+
max_tokens = args.max_tokens
|
| 265 |
+
bing_subscription_key = args.bing_subscription_key
|
| 266 |
+
bing_endpoint = args.bing_endpoint
|
| 267 |
+
use_jina = args.use_jina
|
| 268 |
+
jina_api_key = args.jina_api_key
|
| 269 |
+
cache_dir_base = args.cache_dir_base
|
| 270 |
+
output_dir_base = args.output_dir_base
|
| 271 |
+
is_exclude_urls = args.is_exclude_urls
|
| 272 |
+
# openai_api_base = args.openai_api_base
|
| 273 |
+
use_jina = False
|
| 274 |
+
print(f"use_jina: {use_jina}")
|
| 275 |
+
|
| 276 |
+
print(f"CUDA_VISIBLE_DEVICES is set to: {os.environ['CUDA_VISIBLE_DEVICES']}")
|
| 277 |
+
|
| 278 |
+
# openai_api_key = "EMPTY"
|
| 279 |
+
# openai_api_base = openai_api_base
|
| 280 |
+
# client = OpenAI(
|
| 281 |
+
# api_key=openai_api_key,
|
| 282 |
+
# base_url=openai_api_base,
|
| 283 |
+
# )
|
| 284 |
+
|
| 285 |
+
# Adjust parameters based on dataset
|
| 286 |
+
if dataset_name in ['eval', 'chinese_simpleqa', 'simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki', 'medmcqa', 'pubhealth']:
|
| 287 |
+
MAX_SEARCH_LIMIT = 5
|
| 288 |
+
if dataset_name in ['eval', 'chinese_simpleqa', 'simpleqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 289 |
+
MAX_SEARCH_LIMIT = 10
|
| 290 |
+
MAX_TURN = 15
|
| 291 |
+
top_k = 10
|
| 292 |
+
max_doc_len = 3000
|
| 293 |
+
|
| 294 |
+
if args.jina_api_key == 'None':
|
| 295 |
+
jina_api_key = None
|
| 296 |
+
|
| 297 |
+
# Set default repetition_penalty if not provided
|
| 298 |
+
if repetition_penalty is None:
|
| 299 |
+
repetition_penalty = 1.05 if 'qwq' in model_path.lower() else 1.0
|
| 300 |
+
|
| 301 |
+
# Data paths based on dataset
|
| 302 |
+
if split == "test": # 测试用的数据集地址
|
| 303 |
+
data_path = f"./data/test/{dataset_name}.json"
|
| 304 |
+
elif split == "gen":
|
| 305 |
+
data_path = f"./data/gen/{dataset_name}.json"
|
| 306 |
+
else: # 训练用的数据集地址
|
| 307 |
+
if dataset_name == 'livecode':
|
| 308 |
+
data_path = f'./data/LiveCodeBench/{split}.json'
|
| 309 |
+
elif dataset_name in ['math500', 'gpqa', 'aime', 'amc']:
|
| 310 |
+
data_path = f'./data/{dataset_name.upper()}/{split}.json'
|
| 311 |
+
else:
|
| 312 |
+
data_path = f'./data/QA_Datasets/{dataset_name}.json'
|
| 313 |
+
|
| 314 |
+
print('-----------------------')
|
| 315 |
+
print(f'Using {dataset_name} {split} set.')
|
| 316 |
+
print('-----------------------')
|
| 317 |
+
|
| 318 |
+
# ---------------------- Caching Mechanism ----------------------
|
| 319 |
+
# Define cache directories and file paths
|
| 320 |
+
# cache_dir = './cache'
|
| 321 |
+
model_name = model_path.split('/')[-1].replace('-instruct', '')
|
| 322 |
+
# cache_dir = f'./{cache_dir_base}_{dataset_name}_{model_name}'
|
| 323 |
+
cache_dir = cache_dir_base
|
| 324 |
+
search_cache_path = os.path.join(cache_dir, 'search_cache.json')
|
| 325 |
+
url_cache_path = os.path.join(cache_dir, 'url_cache.json')
|
| 326 |
+
|
| 327 |
+
# Ensure cache directory exists
|
| 328 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 329 |
+
|
| 330 |
+
# Load existing caches or initialize empty dictionaries
|
| 331 |
+
if os.path.exists(search_cache_path):
|
| 332 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 333 |
+
search_cache = json.load(f)
|
| 334 |
+
else:
|
| 335 |
+
search_cache = {}
|
| 336 |
+
|
| 337 |
+
if os.path.exists(url_cache_path):
|
| 338 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 339 |
+
url_cache = json.load(f)
|
| 340 |
+
else:
|
| 341 |
+
url_cache = {}
|
| 342 |
+
|
| 343 |
+
# Function to save caches
|
| 344 |
+
def save_caches():
|
| 345 |
+
with open(search_cache_path, 'w', encoding='utf-8') as f:
|
| 346 |
+
json.dump(search_cache, f, ensure_ascii=False, indent=2)
|
| 347 |
+
with open(url_cache_path, 'w', encoding='utf-8') as f:
|
| 348 |
+
json.dump(url_cache, f, ensure_ascii=False, indent=2)
|
| 349 |
+
|
| 350 |
+
# ---------------------- Model Loading ----------------------
|
| 351 |
+
print(f"Loading tokenizer from {model_path}...")
|
| 352 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 353 |
+
if tokenizer.pad_token is None:
|
| 354 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 355 |
+
tokenizer.padding_side = 'left' # 主要是左填充
|
| 356 |
+
print("Tokenizer loaded successfully.")
|
| 357 |
+
|
| 358 |
+
# Define output directory based on model and dataset
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
# if 'qwq' in model_path.lower():
|
| 362 |
+
# if dataset_name in ['math500', 'gpqa', 'aime', 'amc', 'livecode']:
|
| 363 |
+
# output_dir = f'./{output_dir_base}/{dataset_name}.qwq.search_o1'
|
| 364 |
+
# if dataset_name == 'gpqa' and (MAX_SEARCH_LIMIT != 5 or top_k != 10):
|
| 365 |
+
# output_dir = f'./{output_dir_base}/runs.analysis/{dataset_name}.qwq.search_o1.{MAX_SEARCH_LIMIT}.{top_k}'
|
| 366 |
+
# else:
|
| 367 |
+
# output_dir = f'./{output_dir_base}/runs.qa/{dataset_name}.qwq.search_o1'
|
| 368 |
+
# else:
|
| 369 |
+
# model_short_name = model_path.split('/')[-1].lower().replace('-instruct', '')
|
| 370 |
+
# output_dir = f'./{output_dir_base}/runs.baselines/{dataset_name}.{model_short_name}.search_o1'
|
| 371 |
+
output_dir = output_dir_base
|
| 372 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 373 |
+
|
| 374 |
+
print(f"Loading model from {model_path}...")
|
| 375 |
+
print(f"device_count: {torch.cuda.device_count()}")
|
| 376 |
+
|
| 377 |
+
# Initialize the LLM
|
| 378 |
+
llm = LLM(
|
| 379 |
+
model=model_path,
|
| 380 |
+
tensor_parallel_size=torch.cuda.device_count(),
|
| 381 |
+
gpu_memory_utilization=0.95,
|
| 382 |
+
|
| 383 |
+
)
|
| 384 |
+
print("Model loaded successfully.")
|
| 385 |
+
|
| 386 |
+
# # ----------------------Loading model to reason in document ----------------------
|
| 387 |
+
|
| 388 |
+
# print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
|
| 389 |
+
# tokenizer_doc_reason = AutoTokenizer.from_pretrained(model_doc_reason_path, trust_remote_code=True)
|
| 390 |
+
# if tokenizer_doc_reason.pad_token is None:
|
| 391 |
+
# tokenizer_doc_reason.pad_token = tokenizer_doc_reason.eos_token
|
| 392 |
+
# tokenizer_doc_reason.padding_side = 'left' # 主要是左填充
|
| 393 |
+
# print("tokenizer_doc_reason loaded successfully.")
|
| 394 |
+
|
| 395 |
+
# print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
|
| 396 |
+
|
| 397 |
+
# # Initialize the LLM
|
| 398 |
+
# # torch.cuda.set_device(6,7)
|
| 399 |
+
|
| 400 |
+
# llm_doc_reason = LLM(
|
| 401 |
+
# model=model_doc_reason_path,
|
| 402 |
+
# tensor_parallel_size=2,
|
| 403 |
+
# gpu_memory_utilization=0.95,
|
| 404 |
+
|
| 405 |
+
# )
|
| 406 |
+
# print("Model_doc_reason loaded successfully.")
|
| 407 |
+
|
| 408 |
+
# ---------------------- Data Loading ----------------------
|
| 409 |
+
print(f"Loading data from {data_path}...")
|
| 410 |
+
with open(data_path, 'r', encoding='utf-8') as json_file:
|
| 411 |
+
filtered_data = json.load(json_file)
|
| 412 |
+
print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
|
| 413 |
+
|
| 414 |
+
# ---------------------- Batch Generation Function ----------------------
|
| 415 |
+
def generate_webpage_to_reasonchain_batch( # 模型基于网页内容生成推理,然后从模型的回复中提取答案
|
| 416 |
+
original_questions: List[str],
|
| 417 |
+
prev_reasonings: List[str],
|
| 418 |
+
search_queries: List[str],
|
| 419 |
+
documents: List[List[str]],
|
| 420 |
+
dataset_name: str,
|
| 421 |
+
batch_output_records: List[Dict], # New parameter to collect outputs
|
| 422 |
+
max_tokens: int = 32768,
|
| 423 |
+
coherent: bool = False,
|
| 424 |
+
) -> List[str]:
|
| 425 |
+
|
| 426 |
+
# if "Qwen2.5" in model_path:
|
| 427 |
+
# max_tokens = 8192
|
| 428 |
+
|
| 429 |
+
# encode_docs = tokenizer(documents, truncation=True, max_length=20000, add_special_tokens=False)["input_ids"]
|
| 430 |
+
# documents = tokenizer.batch_decode(encode_docs)
|
| 431 |
+
|
| 432 |
+
# # 统计每个文档的长度
|
| 433 |
+
# doc_lengths = [len(doc) for doc in encode_docs]
|
| 434 |
+
|
| 435 |
+
# # # 打印每个文档的长度
|
| 436 |
+
# # for i, length in enumerate(doc_lengths):
|
| 437 |
+
# # print(f"Document {i + 1}: {length} tokens")
|
| 438 |
+
|
| 439 |
+
# # 如果需要返回长度列表,可以直接使用 doc_lengths
|
| 440 |
+
# print(f"for {model_path}, set max_tokens={max_tokens} for doc gen, truncate documnets. ")
|
| 441 |
+
# print("All document lengths:", doc_lengths)
|
| 442 |
+
|
| 443 |
+
user_prompts = []
|
| 444 |
+
assert len(original_questions) == len(prev_reasonings) == len(search_queries) == len(documents), "Input lists must have the same length"
|
| 445 |
+
|
| 446 |
+
questions_num = len(original_questions)
|
| 447 |
+
|
| 448 |
+
doc_interval = [] # 维护每个query对应的doc去区间
|
| 449 |
+
doc_interval.append(0)
|
| 450 |
+
for i, doc_str_list in enumerate(documents):
|
| 451 |
+
# assert len(doc_str_list) == top_k, f"Expected {top_k} documents, but got {len(doc_str_list)}"
|
| 452 |
+
doc_interval.append(doc_interval[-1]+len(doc_str_list)) # i,i+1为当前query的doc区间
|
| 453 |
+
for j, doc_str in enumerate(doc_str_list):
|
| 454 |
+
r = prev_reasonings[i]
|
| 455 |
+
sq = search_queries[i]
|
| 456 |
+
user_prompts.append(get_webpage_to_reasonchain_instruction_1(r, sq, doc_str))
|
| 457 |
+
|
| 458 |
+
# user_prompts = [ # 根据之前的推理,搜索query和搜索到的doc生成用户提示
|
| 459 |
+
# get_webpage_to_reasonchain_instruction(r, sq, doc)
|
| 460 |
+
# for r, sq, doc in zip(prev_reasonings, search_queries, documents)
|
| 461 |
+
# ]
|
| 462 |
+
|
| 463 |
+
prompts = [{"role": "user", "content": up} for up in user_prompts]
|
| 464 |
+
prompts = [tokenizer.apply_chat_template([p], tokenize=False, add_generation_prompt=True) for p in prompts]
|
| 465 |
+
|
| 466 |
+
output = llm.generate( # 生成模型回复
|
| 467 |
+
prompts,
|
| 468 |
+
sampling_params=SamplingParams(
|
| 469 |
+
max_tokens=max_tokens,
|
| 470 |
+
temperature=0.7,
|
| 471 |
+
top_p=0.8,
|
| 472 |
+
top_k=20,
|
| 473 |
+
repetition_penalty=1.05,
|
| 474 |
+
)
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
raw_outputs = [out.outputs[0].text for out in output]
|
| 478 |
+
extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs] # 提取模型基于网页生成的推理
|
| 479 |
+
|
| 480 |
+
formatted_infos = []
|
| 481 |
+
formatted_outputs = []
|
| 482 |
+
formatted_prompts = []
|
| 483 |
+
|
| 484 |
+
for i in range(questions_num): # 将每个query对应的doc区间划分
|
| 485 |
+
start_idx = doc_interval[i]
|
| 486 |
+
end_idx = doc_interval[i+1]
|
| 487 |
+
selected_prompts = prompts[start_idx:end_idx]
|
| 488 |
+
selected_extracted_infos = extracted_infos[start_idx:end_idx]
|
| 489 |
+
selected_raw_outputs = raw_outputs[start_idx:end_idx]
|
| 490 |
+
|
| 491 |
+
formatted_info = ""
|
| 492 |
+
for id, info in enumerate(selected_extracted_infos):
|
| 493 |
+
formatted_info += f"Summary of Web Page {id}:\n{info}\n\n"
|
| 494 |
+
formatted_infos.append(formatted_info)
|
| 495 |
+
formatted_outputs.append(selected_raw_outputs)
|
| 496 |
+
formatted_prompts.append(selected_prompts)
|
| 497 |
+
|
| 498 |
+
if i == questions_num - 1: # 最后一个
|
| 499 |
+
assert i+1 == len(doc_interval)-1, "原始问题数目应等于doc_interval的长度减一"
|
| 500 |
+
|
| 501 |
+
# for i, (p, e, r) in enumerate(zip(prompts, extracted_infos, raw_outputs)):
|
| 502 |
+
# if i % top_k == 0 and i != 0:
|
| 503 |
+
# formatted_infos.append(formatted_info)
|
| 504 |
+
# formatted_outputs.append(formatted_output)
|
| 505 |
+
# formatted_prompts.append(formatted_prompt)
|
| 506 |
+
# formatted_info = ""
|
| 507 |
+
# formatted_output = []
|
| 508 |
+
# formatted_prompt = []
|
| 509 |
+
|
| 510 |
+
# formatted_info += f"Summary of Web Page {i % top_k + 1}:\n{e}\n\n"
|
| 511 |
+
# formatted_output.append(r)
|
| 512 |
+
# formatted_prompt.append(p)
|
| 513 |
+
|
| 514 |
+
# # 最后一组
|
| 515 |
+
# formatted_infos.append(formatted_info)
|
| 516 |
+
# formatted_outputs.append(formatted_output)
|
| 517 |
+
# formatted_prompts.append(formatted_prompt)
|
| 518 |
+
|
| 519 |
+
assert len(formatted_infos) == len(formatted_outputs) == len(formatted_prompts) == len(original_questions), "Number of formatted_infos and formatted_outputs must match number of questions"
|
| 520 |
+
for i, (p, r, e) in enumerate(zip(formatted_prompts, formatted_outputs, formatted_infos)):
|
| 521 |
+
batch_output_records.append({
|
| 522 |
+
'prompt': p,
|
| 523 |
+
'raw_output': r,
|
| 524 |
+
'extracted_info': e
|
| 525 |
+
})
|
| 526 |
+
|
| 527 |
+
return formatted_infos
|
| 528 |
+
|
| 529 |
+
# ---------------------- Preparation of Input Prompts ----------------------
|
| 530 |
+
input_list = []
|
| 531 |
+
for item in filtered_data: # 生成prompts
|
| 532 |
+
question = item['Question']
|
| 533 |
+
|
| 534 |
+
if dataset_name in ['eval', 'chinese_simpleqa', 'simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 535 |
+
if dataset_name in ['nq', 'triviaqa']:
|
| 536 |
+
instruction = get_singleqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
|
| 537 |
+
elif dataset_name in ['eval', 'chinese_simpleqa', 'simpleqa','hotpotqa', 'musique', 'bamboogle', '2wiki']:
|
| 538 |
+
instruction = get_multiqa_search_o1_instruction_3(MAX_SEARCH_LIMIT)
|
| 539 |
+
if 'qwq' in model_path.lower():
|
| 540 |
+
user_prompt = get_task_instruction_openqa(question, model_name='qwq')
|
| 541 |
+
else:
|
| 542 |
+
user_prompt = get_task_instruction_openqa(question)
|
| 543 |
+
|
| 544 |
+
elif dataset_name in ['math500', 'aime', 'amc']:
|
| 545 |
+
instruction = get_math_search_o1_instruction_1(MAX_SEARCH_LIMIT)
|
| 546 |
+
if 'qwq' in model_path.lower():
|
| 547 |
+
user_prompt = get_task_instruction_math(question, model_name='qwq')
|
| 548 |
+
else:
|
| 549 |
+
user_prompt = get_task_instruction_math(question)
|
| 550 |
+
|
| 551 |
+
elif dataset_name == 'gpqa':
|
| 552 |
+
instruction = get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 553 |
+
if 'qwq' in model_path.lower():
|
| 554 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
|
| 555 |
+
elif 'llama' in model_path.lower():
|
| 556 |
+
user_prompt = get_task_instruction_multi_choice(question, model_name='llama')
|
| 557 |
+
else:
|
| 558 |
+
user_prompt = get_task_instruction_multi_choice(question)
|
| 559 |
+
|
| 560 |
+
elif dataset_name == 'livecode':
|
| 561 |
+
instruction = get_code_search_o1_instruction(MAX_SEARCH_LIMIT)
|
| 562 |
+
question_title = item.get('question_title', '')
|
| 563 |
+
if 'qwq' in model_path.lower():
|
| 564 |
+
user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
|
| 565 |
+
else:
|
| 566 |
+
user_prompt = get_task_instruction_code(question)
|
| 567 |
+
else:
|
| 568 |
+
user_prompt = "" # Default to empty if dataset not matched
|
| 569 |
+
|
| 570 |
+
prompt = [{"role": "user", "content": instruction + user_prompt}] # instruction是告诉模型怎么进行搜索,user_prompt是用户具体问题
|
| 571 |
+
prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
|
| 572 |
+
input_list.append(prompt)
|
| 573 |
+
|
| 574 |
+
if subset_num != -1:
|
| 575 |
+
input_list = input_list[:subset_num]
|
| 576 |
+
filtered_data = filtered_data[:subset_num]
|
| 577 |
+
|
| 578 |
+
# Initialize active sequences
|
| 579 |
+
active_sequences = [{ # 记录每个问题的搜索历史
|
| 580 |
+
'item': item,
|
| 581 |
+
'prompt': prompt,
|
| 582 |
+
'output': '',
|
| 583 |
+
'finished': False, # 一开始均为未完成
|
| 584 |
+
'history': [],
|
| 585 |
+
'search_count': 0,
|
| 586 |
+
'executed_search_queries': set(),
|
| 587 |
+
'all_info': [],
|
| 588 |
+
} for item, prompt in zip(filtered_data, input_list)]
|
| 589 |
+
|
| 590 |
+
# ---------------------- Set Max Tokens ----------------------
|
| 591 |
+
# if 'qwq' in model_path.lower():
|
| 592 |
+
# if dataset_name in ['aime', 'amc', 'livecode']:
|
| 593 |
+
# max_tokens = 32768
|
| 594 |
+
# else:
|
| 595 |
+
# max_tokens = 20480
|
| 596 |
+
# else:
|
| 597 |
+
# max_tokens = 8192
|
| 598 |
+
# max_tokens = 16384
|
| 599 |
+
if dataset_name in ['aime', 'amc', 'livecode']:
|
| 600 |
+
max_tokens = 32768
|
| 601 |
+
else:
|
| 602 |
+
max_tokens = 20480
|
| 603 |
+
# ---------------------- Generation Function ----------------------
|
| 604 |
+
def run_generation(sequences: List[Dict], max_tokens: int) -> List:
|
| 605 |
+
prompts = [s['prompt'] for s in sequences] # 提取prompt
|
| 606 |
+
sampling_params = SamplingParams(
|
| 607 |
+
max_tokens=max_tokens,
|
| 608 |
+
temperature=temperature,
|
| 609 |
+
top_p=top_p,
|
| 610 |
+
top_k=top_k_sampling,
|
| 611 |
+
repetition_penalty=repetition_penalty,
|
| 612 |
+
stop=[END_SEARCH_QUERY, tokenizer.eos_token],
|
| 613 |
+
include_stop_str_in_output=True,
|
| 614 |
+
)
|
| 615 |
+
output_list = llm.generate(prompts, sampling_params=sampling_params) # 模型根据prompt生成回答
|
| 616 |
+
print(f"run_generation completed {len(output_list)}")
|
| 617 |
+
return output_list
|
| 618 |
+
|
| 619 |
+
# Function to extract text between two tags 提取位于 start_tag 和 end_tag 之间的内容
|
| 620 |
+
def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
|
| 621 |
+
pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
|
| 622 |
+
matches = re.findall(pattern, text, flags=re.DOTALL)
|
| 623 |
+
if matches:
|
| 624 |
+
return matches[-1].strip()
|
| 625 |
+
return None
|
| 626 |
+
|
| 627 |
+
def replace_recent_steps(origin_str, replace_str): # 使用replace_str更新origin_str
|
| 628 |
+
"""
|
| 629 |
+
Replaces specific steps in the original reasoning steps with new steps.
|
| 630 |
+
If a replacement step contains "DELETE THIS STEP", that step is removed.
|
| 631 |
+
|
| 632 |
+
Parameters:
|
| 633 |
+
- origin_str (str): The original reasoning steps.
|
| 634 |
+
- replace_str (str): The steps to replace or delete.
|
| 635 |
+
|
| 636 |
+
Returns:
|
| 637 |
+
- str: The updated reasoning steps after applying replacements.
|
| 638 |
+
这个函数的主要功能是替换给定的推理步骤(reasoning steps),
|
| 639 |
+
并根据传入的 replace_str 进行更新。
|
| 640 |
+
如果新的步骤包含 "DELETE THIS STEP",则删除该步骤
|
| 641 |
+
"""
|
| 642 |
+
|
| 643 |
+
def parse_steps(text):
|
| 644 |
+
"""
|
| 645 |
+
Parses the reasoning steps from a given text.
|
| 646 |
+
|
| 647 |
+
Parameters:
|
| 648 |
+
- text (str): The text containing reasoning steps.
|
| 649 |
+
|
| 650 |
+
Returns:
|
| 651 |
+
- dict: A dictionary mapping step numbers to their content.
|
| 652 |
+
"""
|
| 653 |
+
step_pattern = re.compile(r"Step\s+(\d+):\s*") # 这个模式会匹配 "Step" 后面跟一个或多个空格,然后是一个数字(步骤编号),最后是冒号
|
| 654 |
+
steps = {}
|
| 655 |
+
current_step_num = None
|
| 656 |
+
current_content = []
|
| 657 |
+
|
| 658 |
+
for line in text.splitlines(): # 将输入的文本按行分割,并逐行遍历。每一行会被检查是否包含一个步骤
|
| 659 |
+
step_match = step_pattern.match(line)
|
| 660 |
+
if step_match: # 匹配到一个新的步骤
|
| 661 |
+
# If there's an ongoing step, save its content,如果当前的步骤不为空,将其为上一个步骤,先将上一个步骤的内容(存在current_content中)保存,然后再更新current_step_num和current_content
|
| 662 |
+
if current_step_num is not None:
|
| 663 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 664 |
+
current_step_num = int(step_match.group(1))
|
| 665 |
+
content = line[step_match.end():].strip()
|
| 666 |
+
current_content = [content] if content else []
|
| 667 |
+
else:
|
| 668 |
+
if current_step_num is not None:
|
| 669 |
+
current_content.append(line)
|
| 670 |
+
|
| 671 |
+
# Save the last step if any
|
| 672 |
+
if current_step_num is not None: # 保存最后一个步骤
|
| 673 |
+
steps[current_step_num] = "\n".join(current_content).strip()
|
| 674 |
+
|
| 675 |
+
return steps
|
| 676 |
+
|
| 677 |
+
# Parse the original and replacement steps
|
| 678 |
+
origin_steps = parse_steps(origin_str) # 解析原始的推理步骤
|
| 679 |
+
replace_steps = parse_steps(replace_str) # 解析要替换的推理步骤
|
| 680 |
+
|
| 681 |
+
# Apply replacements
|
| 682 |
+
for step_num, content in replace_steps.items(): # 遍历要替换的步骤
|
| 683 |
+
if "DELETE THIS STEP" in content:
|
| 684 |
+
# Remove the step if it exists
|
| 685 |
+
if step_num in origin_steps: # 如果要删除的步骤在原始的推理步骤中存在,则删除该步骤
|
| 686 |
+
del origin_steps[step_num]
|
| 687 |
+
else: # 如果要替换的步骤不是要删除的步骤,则替换该步骤
|
| 688 |
+
# Replace or add the step
|
| 689 |
+
origin_steps[step_num] = content
|
| 690 |
+
|
| 691 |
+
# Sort the steps by step number
|
| 692 |
+
sorted_steps = sorted(origin_steps.items()) # 按照步骤编号对步骤进行排序
|
| 693 |
+
|
| 694 |
+
# Reconstruct the reasoning steps as a single string
|
| 695 |
+
new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps]) # 根据排序后的步骤构建新的推理步骤字符串,步骤之间以两个换行符分隔
|
| 696 |
+
|
| 697 |
+
return new_reasoning_steps
|
| 698 |
+
|
| 699 |
+
# ---------------------- Initialize Collection Structure ----------------------
|
| 700 |
+
# Initialize a list to collect batch outputs
|
| 701 |
+
batch_output_records = []
|
| 702 |
+
|
| 703 |
+
start_time = time.time()
|
| 704 |
+
turn = 0
|
| 705 |
+
|
| 706 |
+
# 流程
|
| 707 |
+
# 首先根据prompt让模型生成回复
|
| 708 |
+
# 从模型的回复中提取搜索查询
|
| 709 |
+
# 如果有(回复要以END_SEARCH_QUERY结尾)
|
| 710 |
+
# 根据搜索查询,从互联网上提取相关信息
|
| 711 |
+
# 处理查询的信息
|
| 712 |
+
# 让模型基于之前的步骤,检索query和查询到的信息生成新的推理,得到search result
|
| 713 |
+
# 然后回到第一步(这里模型就会根据前面的search result,再次生成新的回复
|
| 714 |
+
# 如果没有查询则该条问题结束
|
| 715 |
+
|
| 716 |
+
# Main loop until all sequences are finished or maximum turns reached
|
| 717 |
+
while True:
|
| 718 |
+
# Identify sequences that need generation
|
| 719 |
+
sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']] # 筛选出需要生成的新内容的序列,active_sequences 是一个包含所有活跃序列的列表
|
| 720 |
+
|
| 721 |
+
if sequences_needing_generation:
|
| 722 |
+
turn += 1
|
| 723 |
+
print(f'\n-------------- Turn {turn} --------------')
|
| 724 |
+
print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
|
| 725 |
+
outputs = run_generation(sequences_needing_generation, max_tokens) # 根据prompt
|
| 726 |
+
print("Generation completed, processing outputs...")
|
| 727 |
+
|
| 728 |
+
# Initialize batch variables
|
| 729 |
+
batch_relevant_info = []
|
| 730 |
+
batch_original_questions = []
|
| 731 |
+
batch_prev_reasonings = []
|
| 732 |
+
batch_search_queries = []
|
| 733 |
+
batch_documents = []
|
| 734 |
+
batch_sequences = []
|
| 735 |
+
|
| 736 |
+
# Collect URLs to fetch across all sequences
|
| 737 |
+
all_urls_to_fetch = set() # 初始化一个集合 all_urls_to_fetch 用来收集所有需要获取的 URL
|
| 738 |
+
url_snippets = {}
|
| 739 |
+
url_sequence_map = {} # Map URL to list of sequences needing it
|
| 740 |
+
|
| 741 |
+
# Process each sequence and collect URLs
|
| 742 |
+
for seq, out in zip(sequences_needing_generation, outputs): # 遍历需要生成新内容的序列,并生成新内容,同时收集需要获取的 URL
|
| 743 |
+
text = out.outputs[0].text # 将生成的文本添加到序列的历史记录、提示和输出中
|
| 744 |
+
seq['history'].append(text)
|
| 745 |
+
# Append generated text to prompt and output
|
| 746 |
+
seq['prompt'] += text
|
| 747 |
+
seq['output'] += text
|
| 748 |
+
seq['all_info'].append({f"turn_{turn}_reason": text})
|
| 749 |
+
# Extract search query
|
| 750 |
+
search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY) # 提取搜索查询
|
| 751 |
+
|
| 752 |
+
# If a search query is present and needs to be executed
|
| 753 |
+
if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
|
| 754 |
+
if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
|
| 755 |
+
# Execute search, use cache if available
|
| 756 |
+
if search_query in search_cache:
|
| 757 |
+
results = search_cache[search_query] # 如果���索查询结果在缓存中存在,则从缓存中获取结果
|
| 758 |
+
print(f"Using cached search results for query: \"{search_query}\"")
|
| 759 |
+
else:
|
| 760 |
+
try:
|
| 761 |
+
if is_exclude_urls and "urls" in seq["item"]["metadata"]:
|
| 762 |
+
print(f"is_exclude_urls: {is_exclude_urls}")
|
| 763 |
+
exclude_urls = seq["item"]["metadata"]["urls"]
|
| 764 |
+
else:
|
| 765 |
+
exclude_urls = []
|
| 766 |
+
|
| 767 |
+
print(f"Execute and cache search for query: \"{search_query}\"")
|
| 768 |
+
results = bing_web_search(search_query, bing_subscription_key, bing_endpoint, market='en-US', language='en', exclude_urls=exclude_urls) # 执行搜索
|
| 769 |
+
search_cache[search_query] = results # 将搜索结果添加到缓存中
|
| 770 |
+
print(f"Executed and cached search for query: \"{search_query}\"")
|
| 771 |
+
except Exception as e:
|
| 772 |
+
print(f"Error during search query '{search_query}': {e}")
|
| 773 |
+
search_cache[search_query] = {}
|
| 774 |
+
results = {}
|
| 775 |
+
|
| 776 |
+
# Extract relevant information from Bing search results
|
| 777 |
+
relevant_info = extract_relevant_info(results)[:top_k] # 从搜索结果中提取出最相关的信息
|
| 778 |
+
print(f"len relevant_info: {len(relevant_info)}")
|
| 779 |
+
seq['relevant_info'] = relevant_info
|
| 780 |
+
|
| 781 |
+
# Extract URLs and snippets
|
| 782 |
+
urls_to_fetch = [it['url'] for it in relevant_info] # 从搜索结果中提取出所有 URL
|
| 783 |
+
snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info} # 创建一个字典 snippets,将 URL 映射到它们对应的片段(snippet)。如果 snippet 字段存在,则把它加入字典
|
| 784 |
+
|
| 785 |
+
# Filter URLs that are not cached
|
| 786 |
+
urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache] # 筛选出所有没有被缓存的 UR
|
| 787 |
+
cached_urls = [u for u in urls_to_fetch if u in url_cache] # 选出已经缓存的 URL(即在 url_cache 中存在的 URL)。这些 URL 已经被处理过,不需要再次请求
|
| 788 |
+
|
| 789 |
+
# Store info for all_urls_to_fetch and url_snippets
|
| 790 |
+
for url in urls_to_fetch_filtered:
|
| 791 |
+
all_urls_to_fetch.add(url)
|
| 792 |
+
url_snippets[url] = snippets.get(url, "") # 将每个 URL 对应的片段存储到 url_snippets 字典中
|
| 793 |
+
|
| 794 |
+
all_reasoning_steps = seq['output']
|
| 795 |
+
all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n") # 将连续的空行(\n\n)替换为单个换行符(\n),然后按换行符拆分文本,得到每个推理步骤单独的一行
|
| 796 |
+
|
| 797 |
+
truncated_prev_reasoning = ""
|
| 798 |
+
for i, step in enumerate(all_reasoning_steps):
|
| 799 |
+
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n" # 遍历 all_reasoning_steps 中的每个步骤,并将每个步骤编号和步骤内容格式化后,添加到 truncated_prev_reasoning 字符串中。这样生成一个包含步骤编号和内容的字符串
|
| 800 |
+
|
| 801 |
+
prev_steps = truncated_prev_reasoning.split('\n\n') # 将推理步骤字符串 truncated_prev_reasoning 按照每两个换行符拆分成多个步骤
|
| 802 |
+
if len(prev_steps) <= 5: # 如果步骤的数量不超过 5,直接保留所有步骤
|
| 803 |
+
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
| 804 |
+
else:
|
| 805 |
+
truncated_prev_reasoning = ''
|
| 806 |
+
for i, step in enumerate(prev_steps): # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 807 |
+
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
| 808 |
+
truncated_prev_reasoning += step + '\n\n'
|
| 809 |
+
else: # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
|
| 810 |
+
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
| 811 |
+
truncated_prev_reasoning += '...\n\n'
|
| 812 |
+
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
| 813 |
+
|
| 814 |
+
# Collect parameters for batch processing
|
| 815 |
+
batch_relevant_info.append(relevant_info) # 搜索出来的信息
|
| 816 |
+
batch_original_questions.append(seq['item']['Question']) # 原始问题
|
| 817 |
+
batch_prev_reasonings.append(truncated_prev_reasoning) # 之前的推理步骤
|
| 818 |
+
batch_search_queries.append(search_query) # 搜索查询
|
| 819 |
+
batch_sequences.append(seq)
|
| 820 |
+
|
| 821 |
+
# Update search count and executed queries
|
| 822 |
+
seq['search_count'] += 1 # 更新搜索计数
|
| 823 |
+
seq['executed_search_queries'].add(search_query) # 将已执行的搜索查询添加到集合中
|
| 824 |
+
|
| 825 |
+
elif seq['search_count'] >= MAX_SEARCH_LIMIT: # 如果搜索次数达到或超过该限制,则返回一条消息,通知该查询无法再进行
|
| 826 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
| 827 |
+
seq['prompt'] += limit_message
|
| 828 |
+
seq['output'] += limit_message
|
| 829 |
+
seq['history'].append(limit_message)
|
| 830 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 831 |
+
print(f"Search limit reached for query: \"{search_query}\"")
|
| 832 |
+
|
| 833 |
+
elif search_query in seq['executed_search_queries']: # 如果当前查询已经执行过,则返回一个消息,提示用户查询已重复,并引导其查看之前的结果
|
| 834 |
+
limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
| 835 |
+
seq['prompt'] += limit_message
|
| 836 |
+
seq['output'] += limit_message
|
| 837 |
+
seq['history'].append(limit_message)
|
| 838 |
+
seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
|
| 839 |
+
print(f"Repeated search for query: \"{search_query}\"")
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
else: # 如果当前序列没有需要执行的搜索查询,则将该序列标记为完成,并打印提示信息
|
| 843 |
+
# If no search query needs to be executed, mark the sequence as finished
|
| 844 |
+
seq['finished'] = True
|
| 845 |
+
print("Sequence marked as complete.")
|
| 846 |
+
|
| 847 |
+
print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
|
| 848 |
+
print(f"all_urls_to_fetch: {all_urls_to_fetch}")
|
| 849 |
+
# Batch fetch all URLs at once to optimize speed
|
| 850 |
+
|
| 851 |
+
if all_urls_to_fetch:
|
| 852 |
+
print(f"Fetching {len(all_urls_to_fetch)} URLs...")
|
| 853 |
+
try:
|
| 854 |
+
fetched_contents = fetch_page_content( # 一次性获取所有 URL 的中搜索出来的内容
|
| 855 |
+
list(all_urls_to_fetch),
|
| 856 |
+
use_jina=use_jina,
|
| 857 |
+
jina_api_key=jina_api_key,
|
| 858 |
+
# snippets=url_snippets # Do not pass snippets when updating url_cache directly
|
| 859 |
+
)
|
| 860 |
+
print(f"Fetched {len(fetched_contents)} URLs successfully.")
|
| 861 |
+
except Exception as e:
|
| 862 |
+
print(f"Error during batch URL fetching: {e}")
|
| 863 |
+
fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
|
| 864 |
+
# Update cache with fetched contents
|
| 865 |
+
for url, content in fetched_contents.items(): # 将获取的内容添加到 url_cache 中
|
| 866 |
+
url_cache[url] = content
|
| 867 |
+
|
| 868 |
+
# After fetching, prepare formatted documents for batch processing
|
| 869 |
+
for relevant_info in batch_relevant_info:
|
| 870 |
+
formatted_documents = "" # 初始化一个空字符串 formatted_documents,用于拼接本次要处理的所有网页信息。后面会将其添加到 batch_documents 列表中
|
| 871 |
+
|
| 872 |
+
doc_str_list = []
|
| 873 |
+
for i, doc_info in enumerate(relevant_info):
|
| 874 |
+
url = doc_info['url']
|
| 875 |
+
raw_context = url_cache.get(url, "") # 获取 url 对应的内容
|
| 876 |
+
doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
|
| 877 |
+
success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
|
| 878 |
+
if success:
|
| 879 |
+
print("extract_snippet_with_context")
|
| 880 |
+
context = filtered_context
|
| 881 |
+
else: # 否则就取 raw_context 的前 max_doc_len * 2 个字符,作为一个有限的片段,避免过长导致后续处理负担
|
| 882 |
+
print(f"use raw_context, {len(raw_context)}")
|
| 883 |
+
context = raw_context[:max_doc_len*2]
|
| 884 |
+
|
| 885 |
+
doc_info['context'] = context
|
| 886 |
+
# formatted_documents += f"**Web Page {i + 1}:**\n"
|
| 887 |
+
# formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
|
| 888 |
+
single_doc = json.dumps(doc_info, ensure_ascii=False, indent=2)
|
| 889 |
+
doc_str_list.append(single_doc)
|
| 890 |
+
batch_documents.append(doc_str_list) # 将本组搜索结果的所有信息(拼接成的字符串 formatted_documents)添加到 batch_documents 列表中
|
| 891 |
+
|
| 892 |
+
# After fetching, prepare for batch processing if there are any
|
| 893 |
+
if batch_sequences:
|
| 894 |
+
print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
|
| 895 |
+
webpage_analyses = generate_webpage_to_reasonchain_batch( # 根据前面处理好的结果,生成新的推理
|
| 896 |
+
original_questions=batch_original_questions,
|
| 897 |
+
prev_reasonings=batch_prev_reasonings,
|
| 898 |
+
search_queries=batch_search_queries,
|
| 899 |
+
documents=batch_documents,
|
| 900 |
+
dataset_name=dataset_name,
|
| 901 |
+
batch_output_records=batch_output_records, # Pass the collection list
|
| 902 |
+
max_tokens=max_tokens,
|
| 903 |
+
)
|
| 904 |
+
print("Batch generation completed, assigning outputs to sequences...")
|
| 905 |
+
|
| 906 |
+
for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents): # 遍历批处理返回的 webpage_analyses,将处理结果与相应的序列 seq 进行一一对应
|
| 907 |
+
if isinstance(analysis, str): # 判断 analysis 是否是纯字符串。如果是字符串,说明直接可以追加到序列的文本中
|
| 908 |
+
append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n" # 封装处理结果,添加到序列的历史记录、提示和输出中
|
| 909 |
+
seq['prompt'] += append_text
|
| 910 |
+
seq['output'] += append_text
|
| 911 |
+
seq['history'].append(append_text) # 存的是每一次的webpage_analyses
|
| 912 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 913 |
+
else: # 如果 analysis 不是纯字符串,那么可能是一种特殊的结构,比如表示需要替换推理步骤的 dict 或其他格式
|
| 914 |
+
append_text = replace_recent_steps(seq['output'], analysis)
|
| 915 |
+
seq['prompt'] += append_text
|
| 916 |
+
seq['output'] += append_text
|
| 917 |
+
seq['history'].append(append_text)
|
| 918 |
+
seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
|
| 919 |
+
|
| 920 |
+
# Check if all sequences are finished
|
| 921 |
+
# 保存active_sequences
|
| 922 |
+
active_sequences_part = [{ # 记录每个问题的搜索历史
|
| 923 |
+
'item': ele["item"],
|
| 924 |
+
'prompt': ele['prompt'],
|
| 925 |
+
'output': ele["output"],
|
| 926 |
+
'finished': ele["finished"], # 一开始均为未完成
|
| 927 |
+
'history':ele["history"],
|
| 928 |
+
'search_count': ele["search_count"],
|
| 929 |
+
'all_info': ele['all_info']
|
| 930 |
+
} for ele in active_sequences]
|
| 931 |
+
with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
|
| 932 |
+
json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
|
| 933 |
+
unfinished = [seq for seq in active_sequences if not seq['finished']] # 是否结束是基于模型是否生成了新的搜索
|
| 934 |
+
if not unfinished:
|
| 935 |
+
break
|
| 936 |
+
else:
|
| 937 |
+
if turn >= MAX_TURN:
|
| 938 |
+
print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
|
| 939 |
+
break
|
| 940 |
+
|
| 941 |
+
total_time = time.time() - start_time
|
| 942 |
+
print(f"Total time taken: {total_time} seconds")
|
| 943 |
+
|
| 944 |
+
# ---------------------- Save Batch Output Records to JSON File ----------------------
|
| 945 |
+
# Define output JSON file path
|
| 946 |
+
t = time.localtime()
|
| 947 |
+
batch_output_file = os.path.join(output_dir, f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
|
| 948 |
+
|
| 949 |
+
# Save batch_output_records to JSON file
|
| 950 |
+
with open(batch_output_file, 'w', encoding='utf-8') as f: # 这里存的是webpage推理时的输入和输出和提取后的信息
|
| 951 |
+
json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
|
| 952 |
+
|
| 953 |
+
print(f"Batch outputs saved to {batch_output_file}")
|
| 954 |
+
|
| 955 |
+
# Prepare output list for evaluation
|
| 956 |
+
output_list = [seq['output'] for seq in active_sequences]
|
| 957 |
+
|
| 958 |
+
# Run evaluation
|
| 959 |
+
if dataset_name == "eval":
|
| 960 |
+
run_evaluation_for_eval(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
|
| 961 |
+
else:
|
| 962 |
+
run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
|
| 963 |
+
|
| 964 |
+
# 评测has answer信息
|
| 965 |
+
turn_files = os.listdir(output_dir)
|
| 966 |
+
turn_files = [file for file in turn_files if file.startswith("turn_")]
|
| 967 |
+
max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
|
| 968 |
+
|
| 969 |
+
max_turn_file_path = os.path.join(output_dir, max_turn_file)
|
| 970 |
+
print(f"max_turn_file_path: {max_turn_file_path}")
|
| 971 |
+
add_eval(model_path, max_turn_file_path)
|
| 972 |
+
|
| 973 |
+
# ---------------------- Update Search and URL Cache ----------------------
|
| 974 |
+
print('Updating Search and URL Cache...')
|
| 975 |
+
# Load existing caches or initialize empty dictionaries
|
| 976 |
+
if os.path.exists(search_cache_path):
|
| 977 |
+
with open(search_cache_path, 'r', encoding='utf-8') as f:
|
| 978 |
+
search_cache_new = json.load(f)
|
| 979 |
+
else:
|
| 980 |
+
search_cache_new = {}
|
| 981 |
+
|
| 982 |
+
if os.path.exists(url_cache_path):
|
| 983 |
+
with open(url_cache_path, 'r', encoding='utf-8') as f:
|
| 984 |
+
url_cache_new = json.load(f)
|
| 985 |
+
else:
|
| 986 |
+
url_cache_new = {}
|
| 987 |
+
|
| 988 |
+
search_cache.update(search_cache_new)
|
| 989 |
+
url_cache.update(url_cache_new)
|
| 990 |
+
|
| 991 |
+
save_caches()
|
| 992 |
+
|
| 993 |
+
print("Process completed.")
|
| 994 |
+
|
| 995 |
+
if __name__ == "__main__":
|
| 996 |
+
main()
|
| 997 |
+
|
deep_search/search_o1/scripts/vllm_serve_test.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
|
| 3 |
+
# Modify OpenAI's API key and API base to use vLLM's API server.
|
| 4 |
+
# 使用 vLLM 的 API 服务器需要修改 OpenAI 的 API 密钥和 API 库。
|
| 5 |
+
|
| 6 |
+
openai_api_key = "EMPTY"
|
| 7 |
+
openai_api_base = "http://localhost:8000/v1"
|
| 8 |
+
client = OpenAI(
|
| 9 |
+
api_key=openai_api_key,
|
| 10 |
+
base_url=openai_api_base,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
completion = client.completions.create(
|
| 15 |
+
model="/capacity/userdata/models/QwQ-32B-Preview",
|
| 16 |
+
max_tokens=20480,
|
| 17 |
+
temperature=0.7,
|
| 18 |
+
top_p=0.8,
|
| 19 |
+
prompt="nihao",
|
| 20 |
+
)
|
| 21 |
+
print("Completion result:", completion.choices[0].text)
|
deep_search/sft.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import Optional, Dict, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import random_split
|
| 11 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 12 |
+
import transformers
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
from transformers import Trainer
|
| 15 |
+
import random
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from transformers import AutoModelForCausalLM, TrainingArguments
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
from transformers import DataCollatorForSeq2Seq
|
| 20 |
+
import shutil
|
| 21 |
+
|
| 22 |
+
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import matplotlib.pyplot as plt
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class ModelArguments:
|
| 31 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 32 |
+
# flash_attention: Optional[bool] = field(default=False)
|
| 33 |
+
tokenizer_name_or_path: Optional[str] = field(default=None)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class DataArguments:
|
| 38 |
+
data_path: str = field(
|
| 39 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 40 |
+
)
|
| 41 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 42 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 47 |
+
cache_dir: Optional[str] = field(default=None)
|
| 48 |
+
optim: str = field(default="adamw_torch")
|
| 49 |
+
model_max_length: int = field(
|
| 50 |
+
default=512,
|
| 51 |
+
metadata={
|
| 52 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 53 |
+
},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
IGNORE_INDEX = -100
|
| 58 |
+
MAX_LENGTH = 2000
|
| 59 |
+
|
| 60 |
+
def process(sample, tokenizer):
|
| 61 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 62 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 63 |
+
source = sample["input"]
|
| 64 |
+
# print(source)
|
| 65 |
+
# print(tokenizer.bos_token)
|
| 66 |
+
# print(source == None)
|
| 67 |
+
# if tokenizer.bos_token not in source:
|
| 68 |
+
# source = tokenizer.apply_chat_template(
|
| 69 |
+
# [
|
| 70 |
+
# {'role': 'user', 'content': source}
|
| 71 |
+
# ],
|
| 72 |
+
# tokenize=False, add_generation_prompt=True
|
| 73 |
+
# )
|
| 74 |
+
# # print(source)
|
| 75 |
+
source = tokenizer.apply_chat_template(
|
| 76 |
+
[
|
| 77 |
+
{'role': 'user', 'content': source}
|
| 78 |
+
],
|
| 79 |
+
tokenize=False, add_generation_prompt=True
|
| 80 |
+
)
|
| 81 |
+
# print(source)
|
| 82 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 83 |
+
target = [IGNORE_INDEX] * len(source)
|
| 84 |
+
for output in sample["output"]:
|
| 85 |
+
for k, v in output.items():
|
| 86 |
+
if v is None:
|
| 87 |
+
continue
|
| 88 |
+
v_tokens = tokenizer(v, add_special_tokens=False)["input_ids"]
|
| 89 |
+
if k in ["gen"]:
|
| 90 |
+
source += v_tokens
|
| 91 |
+
target += v_tokens
|
| 92 |
+
elif k in ["doc_gen"]:
|
| 93 |
+
source += v_tokens
|
| 94 |
+
target += [IGNORE_INDEX] * len(v_tokens)
|
| 95 |
+
input_ids = source
|
| 96 |
+
labels = target
|
| 97 |
+
|
| 98 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 99 |
+
labels.append(tokenizer.eos_token_id)
|
| 100 |
+
# if tokenizer.eos_token_id not in source:
|
| 101 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 102 |
+
# if tokenizer.eos_token_id not in labels:
|
| 103 |
+
# labels.append(tokenizer.eos_token_id)
|
| 104 |
+
|
| 105 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 106 |
+
# return None
|
| 107 |
+
result = {
|
| 108 |
+
"input_ids": input_ids,
|
| 109 |
+
"attention_mask": [1] * len(input_ids),
|
| 110 |
+
"labels": labels,
|
| 111 |
+
}
|
| 112 |
+
# print(result)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def print_function(example, tokenizer):
|
| 117 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
| 118 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
| 119 |
+
print("label_ids:\n{}".format(example["labels"]))
|
| 120 |
+
print("labels:\n{}".format(
|
| 121 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
| 122 |
+
))
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def get_dataset(file_path, tokenizer):
|
| 126 |
+
dataset = load_dataset('json', data_files=file_path)
|
| 127 |
+
train_dataset = dataset["train"]
|
| 128 |
+
file_name = os.path.basename(file_path)
|
| 129 |
+
dataset_name = os.path.splitext(file_name)[0]
|
| 130 |
+
# print(f"dataset_name: {dataset_name}")
|
| 131 |
+
# if os.path.exists(f"input/real_cache/{dataset_name}/"):
|
| 132 |
+
# shutil.rmtree(f"input/real_cache/{dataset_name}/")
|
| 133 |
+
# os.makedirs(f"input/real_cache/{dataset_name}/", exist_ok=True)
|
| 134 |
+
# tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, cache_file_name=f"input/real_cache/{dataset_name}/cache1.arrow")
|
| 135 |
+
tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 136 |
+
print_function(next(iter(tokenized_dataset)), tokenizer)
|
| 137 |
+
print(f"len of dataset before filter: {len(tokenized_dataset)}")
|
| 138 |
+
|
| 139 |
+
filtered_dataset = []
|
| 140 |
+
for item in tokenized_dataset:
|
| 141 |
+
if len(item["input_ids"]) <= 8000:
|
| 142 |
+
filtered_dataset.append(item)
|
| 143 |
+
print(f"len of dataset after filter: {len(filtered_dataset)}")
|
| 144 |
+
return filtered_dataset
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def train():
|
| 148 |
+
parser = transformers.HfArgumentParser(
|
| 149 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 150 |
+
)
|
| 151 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 152 |
+
|
| 153 |
+
print("==========Model Args=========")
|
| 154 |
+
print(model_args)
|
| 155 |
+
print("==========Data Args=========")
|
| 156 |
+
print(data_args)
|
| 157 |
+
print("==========Training Args=========")
|
| 158 |
+
print(training_args)
|
| 159 |
+
|
| 160 |
+
if training_args.gradient_checkpointing:
|
| 161 |
+
use_cache = False # use_cache与gradient_checkpointing不能同时设置为true
|
| 162 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 163 |
+
model_args.model_name_or_path,
|
| 164 |
+
_attn_implementation="flash_attention_2",
|
| 165 |
+
use_cache=use_cache,
|
| 166 |
+
# save_only_model=True
|
| 167 |
+
).float()
|
| 168 |
+
# model = AutoLigerKernelForCausalLM.from_pretrained(
|
| 169 |
+
# model_args.model_name_or_path,
|
| 170 |
+
# _attn_implementation="flash_attention_2",
|
| 171 |
+
# use_cache=use_cache,
|
| 172 |
+
# # save_only_model=True
|
| 173 |
+
# ).float()
|
| 174 |
+
if model_args.tokenizer_name_or_path is None:
|
| 175 |
+
model_args.tokenizer_name_or_path = model_args.model_name_or_path
|
| 176 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 177 |
+
model_args.tokenizer_name_or_path, model_max_length=training_args.model_max_length
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
if tokenizer.pad_token is None:
|
| 181 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 182 |
+
|
| 183 |
+
dataset = get_dataset(data_args.data_path, tokenizer)
|
| 184 |
+
|
| 185 |
+
data_collator = DataCollatorForSeq2Seq(
|
| 186 |
+
tokenizer=tokenizer,
|
| 187 |
+
label_pad_token_id=-100,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
trainer = Trainer(
|
| 191 |
+
model=model,
|
| 192 |
+
args=training_args,
|
| 193 |
+
tokenizer=tokenizer,
|
| 194 |
+
data_collator=data_collator,
|
| 195 |
+
train_dataset=dataset,
|
| 196 |
+
)
|
| 197 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 198 |
+
trainer.save_model(training_args.output_dir)
|
| 199 |
+
trainer.save_state()
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
torch.manual_seed(42)
|
| 204 |
+
train()
|