SunSec commited on
Commit
361e56c
·
verified ·
1 Parent(s): 7c664ea

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. OpenManus/.github/ISSUE_TEMPLATE/config.yaml +4 -0
  2. OpenManus/.github/ISSUE_TEMPLATE/show_me_the_bug.md +25 -0
  3. OpenManus/.github/workflows/build-package.yaml +33 -0
  4. OpenManus/.github/workflows/pre-commit.yaml +26 -0
  5. OpenManus/.github/workflows/stale.yaml +23 -0
  6. deep_search/data_syn/bing_search.py +421 -0
  7. deep_search/data_syn/data_keypoints_stats.py +50 -0
  8. deep_search/data_syn/data_stats.py +250 -0
  9. deep_search/data_syn/data_tag.py +186 -0
  10. deep_search/data_syn/data_tag.sh +113 -0
  11. deep_search/data_syn/data_tag_domain_keypoints.py +118 -0
  12. deep_search/data_syn/extract_domain_keypoints.py +87 -0
  13. deep_search/data_syn/graph_selection.py +91 -0
  14. deep_search/data_syn/json_to_text.py +48 -0
  15. deep_search/data_syn/keywords_count.py +60 -0
  16. deep_search/data_syn/merge.py +28 -0
  17. deep_search/data_syn/merge_two_data.py +33 -0
  18. deep_search/data_syn/search_o1_data_syn.py +937 -0
  19. deep_search/data_syn/search_o1_data_syn_sum_single_page.py +953 -0
  20. deep_search/data_syn/select_data.py +233 -0
  21. deep_search/data_syn/select_remain_data.py +58 -0
  22. deep_search/data_syn/source_stats.py +57 -0
  23. deep_search/data_syn/test.py +69 -0
  24. deep_search/data_syn/testttt.py +2 -0
  25. deep_search/search_o1/google_search.py +246 -0
  26. deep_search/search_o1/infer_wo_search.sh +59 -0
  27. deep_search/search_o1/llm_as_judge_source.sh +67 -0
  28. deep_search/search_o1/process_analyses_data.py +65 -0
  29. deep_search/search_o1/reason_two_model_1.sh +60 -0
  30. deep_search/search_o1/requirements.txt +7 -0
  31. deep_search/search_o1/run_eval_1.sh +91 -0
  32. deep_search/search_o1/run_eval_2.sh +52 -0
  33. deep_search/search_o1/run_eval_3.sh +220 -0
  34. deep_search/search_o1/run_eval_benchmark.sh +691 -0
  35. deep_search/search_o1/run_eval_benchmark_date.sh +62 -0
  36. deep_search/search_o1/run_eval_benchmark_musique_syn.sh +108 -0
  37. deep_search/search_o1/run_eval_benchmark_realqa.sh +129 -0
  38. deep_search/search_o1/run_eval_benchmark_rl.sh +14 -0
  39. deep_search/search_o1/run_eval_sft_1.sh +89 -0
  40. deep_search/search_o1/run_eval_sft_2.sh +106 -0
  41. deep_search/search_o1/run_eval_sft_3.sh +30 -0
  42. deep_search/search_o1/run_eval_sft_sh.sh +5 -0
  43. deep_search/search_o1/run_eval_sft_train.sh +13 -0
  44. deep_search/search_o1/run_gen_data_1.sh +36 -0
  45. deep_search/search_o1/run_gen_data_for_simple_qa.sh +379 -0
  46. deep_search/search_o1/run_gen_data_for_simple_qa_ex_urls.sh +36 -0
  47. deep_search/search_o1/run_gen_data_mix.sh +246 -0
  48. deep_search/search_o1/run_gen_data_mix_last_8000.sh +260 -0
  49. deep_search/search_o1/run_gen_data_selected_remove_2k.sh +201 -0
  50. deep_search/search_o1/search.json +172 -0
OpenManus/.github/ISSUE_TEMPLATE/config.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: "📑 Read online docs"
4
+ about: Find tutorials, use cases, and guides in the OpenManus documentation.
OpenManus/.github/ISSUE_TEMPLATE/show_me_the_bug.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "🪲 Show me the Bug"
3
+ about: Report a bug encountered while using OpenManus and seek assistance.
4
+ title: ''
5
+ labels: kind/bug
6
+ assignees: ''
7
+ ---
8
+
9
+ **Bug description**
10
+ <!-- Clearly describe the bug you encountered -->
11
+
12
+ **Bug solved method**
13
+ <!-- If resolved, explain the solution. Optionally, include a Pull Request URL. -->
14
+ <!-- If unresolved, provide additional details to aid investigation -->
15
+
16
+ **Environment information**
17
+ <!-- System: e.g., Ubuntu 22.04, Python: e.g., 3.12, OpenManus version: e.g., 0.1.0 -->
18
+
19
+ - System version:
20
+ - Python version:
21
+ - OpenManus version or branch:
22
+ - Installation method (e.g., `pip install -r requirements.txt` or `pip install -e .`):
23
+
24
+ **Screenshots or logs**
25
+ <!-- Attach screenshots or logs to help diagnose the issue -->
OpenManus/.github/workflows/build-package.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build and upload Python package
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ release:
6
+ types: [created, published]
7
+
8
+ jobs:
9
+ deploy:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+ - name: Set up Python
14
+ uses: actions/setup-python@v5
15
+ with:
16
+ python-version: '3.12'
17
+ cache: 'pip'
18
+ - name: Install dependencies
19
+ run: |
20
+ python -m pip install --upgrade pip
21
+ pip install -r requirements.txt
22
+ pip install setuptools wheel twine
23
+ - name: Set package version
24
+ run: |
25
+ export VERSION="${GITHUB_REF#refs/tags/v}"
26
+ sed -i "s/version=.*/version=\"${VERSION}\",/" setup.py
27
+ - name: Build and publish
28
+ env:
29
+ TWINE_USERNAME: __token__
30
+ TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
31
+ run: |
32
+ python setup.py bdist_wheel sdist
33
+ twine upload dist/*
OpenManus/.github/workflows/pre-commit.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Pre-commit checks
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - '**'
7
+ push:
8
+ branches:
9
+ - '**'
10
+
11
+ jobs:
12
+ pre-commit-check:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Checkout Source Code
16
+ uses: actions/checkout@v4
17
+ - name: Set up Python 3.12
18
+ uses: actions/setup-python@v5
19
+ with:
20
+ python-version: '3.12'
21
+ - name: Install pre-commit and tools
22
+ run: |
23
+ python -m pip install --upgrade pip
24
+ pip install pre-commit black==23.1.0 isort==5.12.0 autoflake==2.0.1
25
+ - name: Run pre-commit hooks
26
+ run: pre-commit run --all-files
OpenManus/.github/workflows/stale.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Close inactive issues
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "5 0 * * *"
6
+
7
+ jobs:
8
+ close-issues:
9
+ runs-on: ubuntu-latest
10
+ permissions:
11
+ issues: write
12
+ pull-requests: write
13
+ steps:
14
+ - uses: actions/stale@v5
15
+ with:
16
+ days-before-issue-stale: 30
17
+ days-before-issue-close: 14
18
+ stale-issue-label: "inactive"
19
+ stale-issue-message: "This issue has been inactive for 30 days. Please comment if you have updates."
20
+ close-issue-message: "This issue was closed due to 45 days of inactivity. Reopen if still relevant."
21
+ days-before-pr-stale: -1
22
+ days-before-pr-close: -1
23
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
deep_search/data_syn/bing_search.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ from requests.exceptions import Timeout
5
+ from bs4 import BeautifulSoup
6
+ from tqdm import tqdm
7
+ import time
8
+ import concurrent
9
+ from concurrent.futures import ThreadPoolExecutor
10
+ import pdfplumber
11
+ from io import BytesIO
12
+ import re
13
+ import string
14
+ from typing import Optional, Tuple
15
+ from nltk.tokenize import sent_tokenize
16
+
17
+ # os.environ['http_proxy'] = 'http://127.0.0.1:7890'
18
+ # os.environ['https_proxy'] = 'http://127.0.0.1:7890'
19
+
20
+
21
+ # ----------------------- Custom Headers -----------------------
22
+ headers = {
23
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
24
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
25
+ 'Chrome/58.0.3029.110 Safari/537.36',
26
+ 'Referer': 'https://www.google.com/',
27
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
28
+ 'Accept-Language': 'en-US,en;q=0.5',
29
+ 'Connection': 'keep-alive',
30
+ 'Upgrade-Insecure-Requests': '1'
31
+ }
32
+
33
+ # Initialize session
34
+ session = requests.Session()
35
+ session.headers.update(headers)
36
+
37
+
38
+
39
+ def remove_punctuation(text: str) -> str:
40
+ """Remove punctuation from the text."""
41
+ return text.translate(str.maketrans("", "", string.punctuation))
42
+
43
+ def f1_score(true_set: set, pred_set: set) -> float:
44
+ """Calculate the F1 score between two sets of words."""
45
+ intersection = len(true_set.intersection(pred_set))
46
+ if not intersection:
47
+ return 0.0
48
+ precision = intersection / float(len(pred_set))
49
+ recall = intersection / float(len(true_set))
50
+ return 2 * (precision * recall) / (precision + recall)
51
+
52
+ def extract_snippet_with_context(full_text: str, snippet: str, context_chars: int = 2500) -> Tuple[bool, str]:
53
+ """
54
+ Extract the sentence that best matches the snippet and its context from the full text.
55
+
56
+ Args:
57
+ full_text (str): The full text extracted from the webpage.
58
+ snippet (str): The snippet to match.
59
+ context_chars (int): Number of characters to include before and after the snippet.
60
+
61
+ Returns:
62
+ Tuple[bool, str]: The first element indicates whether extraction was successful, the second element is the extracted context.
63
+ # 这个 extract_snippet_with_context 函数的作用是 从一段长文本中找到最符合给定片段(snippet)的句子,并返回包含该句子的一定上下文范围的文本。它的核心逻辑包括 文本预处理、句子匹配、F1 评分计算、上下文截取 等几个步骤。
64
+ """
65
+ try:
66
+ full_text = full_text[:50000]
67
+
68
+ snippet = snippet.lower()
69
+ snippet = remove_punctuation(snippet)
70
+ snippet_words = set(snippet.split())
71
+
72
+ best_sentence = None
73
+ best_f1 = 0.2
74
+
75
+ # sentences = re.split(r'(?<=[.!?]) +', full_text) # Split sentences using regex, supporting ., !, ? endings
76
+ sentences = sent_tokenize(full_text) # Split sentences using nltk's sent_tokenize
77
+
78
+ for sentence in sentences:
79
+ key_sentence = sentence.lower()
80
+ key_sentence = remove_punctuation(key_sentence)
81
+ sentence_words = set(key_sentence.split())
82
+ f1 = f1_score(snippet_words, sentence_words)
83
+ if f1 > best_f1:
84
+ best_f1 = f1
85
+ best_sentence = sentence
86
+
87
+ if best_sentence:
88
+ para_start = full_text.find(best_sentence)
89
+ para_end = para_start + len(best_sentence)
90
+ start_index = max(0, para_start - context_chars)
91
+ end_index = min(len(full_text), para_end + context_chars)
92
+ context = full_text[start_index:end_index]
93
+ return True, context
94
+ else:
95
+ # If no matching sentence is found, return the first context_chars*2 characters of the full text
96
+ return False, full_text[:context_chars * 2]
97
+ except Exception as e:
98
+ return False, f"Failed to extract snippet context due to {str(e)}"
99
+
100
+ def extract_text_from_url(url, use_jina=False, jina_api_key=None, snippet: Optional[str] = None):
101
+ """
102
+ Extract text from a URL. If a snippet is provided, extract the context related to it.
103
+
104
+ Args:
105
+ url (str): URL of a webpage or PDF.
106
+ use_jina (bool): Whether to use Jina for extraction.
107
+ snippet (Optional[str]): The snippet to search for.
108
+
109
+ Returns:
110
+ str: Extracted text or context.
111
+ """
112
+ try:
113
+ print(f"extract_text_from_url use_jina: {use_jina}")
114
+ if use_jina:
115
+ jina_headers = {
116
+ 'Authorization': f'Bearer {jina_api_key}',
117
+ 'X-Return-Format': 'markdown',
118
+ # 'X-With-Links-Summary': 'true'
119
+ }
120
+ response = requests.get(f'https://r.jina.ai/{url}', headers=jina_headers).text
121
+ # Remove URLs
122
+ pattern = r"\(https?:.*?\)|\[https?:.*?\]"
123
+ text = re.sub(pattern, "", response).replace('---','-').replace('===','=').replace(' ',' ').replace(' ',' ')
124
+ print("use jina to extract text successfully")
125
+ else:
126
+ print(f"don't use jina to extract text")
127
+ response = session.get(url, timeout=20) # Set timeout to 20 seconds
128
+ response.raise_for_status() # Raise HTTPError if the request failed
129
+ # Determine the content type
130
+ content_type = response.headers.get('Content-Type', '')
131
+ if 'pdf' in content_type:
132
+ # If it's a PDF file, extract PDF text
133
+ print("Extracting text from PDF...")
134
+ return extract_pdf_text(url)
135
+ # Try using lxml parser, fallback to html.parser if unavailable
136
+ try:
137
+ soup = BeautifulSoup(response.text, 'lxml')
138
+ except Exception:
139
+ print("lxml parser not found or failed, falling back to html.parser")
140
+ soup = BeautifulSoup(response.text, 'html.parser')
141
+ text = soup.get_text(separator=' ', strip=True)
142
+
143
+ if snippet:
144
+ success, context = extract_snippet_with_context(text, snippet)
145
+ if success:
146
+ return context
147
+ else:
148
+ return text
149
+ else:
150
+ # If no snippet is provided, return directly
151
+ return text[:8000]
152
+ except requests.exceptions.HTTPError as http_err:
153
+ return f"HTTP error occurred: {http_err}"
154
+ except requests.exceptions.ConnectionError:
155
+ return "Error: Connection error occurred"
156
+ except requests.exceptions.Timeout:
157
+ return "Error: Request timed out after 20 seconds"
158
+ except Exception as e:
159
+ return f"Unexpected error: {str(e)}"
160
+
161
+ def fetch_page_content(urls, max_workers=4, use_jina=False, jina_api_key=None, snippets: Optional[dict] = None):
162
+ """
163
+ Concurrently fetch content from multiple URLs.
164
+
165
+ Args:
166
+ urls (list): List of URLs to scrape.
167
+ max_workers (int): Maximum number of concurrent threads.
168
+ use_jina (bool): Whether to use Jina for extraction.
169
+ snippets (Optional[dict]): A dictionary mapping URLs to their respective snippets.
170
+
171
+ Returns:
172
+ dict: A dictionary mapping URLs to the extracted content or context.
173
+ 这段代码定义了一个名为 fetch_page_content 的函数,用于并发地从多个 URL 中提取内容。该函数使用 ThreadPoolExecutor 来并发执行提取操作
174
+ """
175
+ results = {}
176
+ with ThreadPoolExecutor(max_workers=64) as executor:
177
+ # Use tqdm to display a progress bar
178
+ futures = {
179
+ executor.submit(extract_text_from_url, url, use_jina, jina_api_key,snippets.get(url) if snippets else None): url
180
+ for url in urls
181
+ }
182
+ for future in tqdm(concurrent.futures.as_completed(futures), desc="Fetching URLs", total=len(urls)):
183
+ url = futures[future]
184
+ try:
185
+ data = future.result()
186
+ results[url] = data
187
+ except Exception as exc:
188
+ results[url] = f"Error fetching {url}: {exc}"
189
+ time.sleep(0.2) # Simple rate limiting
190
+ return results
191
+
192
+
193
+ # def bing_web_search(query, subscription_key, endpoint, market='en-US', language='en', timeout=20):
194
+ # """
195
+ # Perform a search using the Bing Web Search API with a set timeout.
196
+
197
+ # Args:
198
+ # query (str): Search query.
199
+ # subscription_key (str): Subscription key for the Bing Search API.
200
+ # endpoint (str): Endpoint for the Bing Search API.
201
+ # market (str): Market, e.g., "en-US" or "zh-CN".
202
+ # language (str): Language of the results, e.g., "en".
203
+ # timeout (int or float or tuple): Request timeout in seconds.
204
+ # Can be a float representing the total timeout,
205
+ # or a tuple (connect timeout, read timeout).
206
+
207
+ # Returns:
208
+ # dict: JSON response of the search results. Returns None or raises an exception if the request times out.
209
+ # 函数的目标是使用 Bing Web Search API 执行搜索,并返回 JSON 格式的结果。
210
+ # 如果请求超时或出现其他问题,返回空字典({})或抛出异常
211
+ # """
212
+ # headers = {
213
+ # "Ocp-Apim-Subscription-Key": subscription_key
214
+ # }
215
+ # params = {
216
+ # "q": query,
217
+ # "mkt": market,
218
+ # "setLang": language,
219
+ # "textDecorations": True,
220
+ # "textFormat": "HTML"
221
+ # }
222
+
223
+ # try:
224
+ # response = requests.get(endpoint, headers=headers, params=params, timeout=timeout)
225
+ # response.raise_for_status() # Raise exception if the request failed 检查响应的状态码。如果返回的状态码是 4xx 或 5xx(表示客户端或服务器错误),它将引发 requests.exceptions.HTTPError 异常
226
+ # search_results = response.json() # response.json() 会返回一个 Python 字典(或类似字典的对象),其中包含了 Bing Web Search 返回的搜索结果
227
+ # return search_results
228
+ # except Timeout:
229
+ # print(f"Bing Web Search request timed out ({timeout} seconds) for query: {query}")
230
+ # return {} # Or you can choose to raise an exception
231
+ # except requests.exceptions.RequestException as e:
232
+ # print(f"Error occurred during Bing Web Search request: {e}")
233
+ # return {}
234
+
235
+ proxies = {
236
+ "http": "http://127.0.0.1:7880",
237
+ "https": "http://127.0.0.1:7880"
238
+ }
239
+
240
+
241
+ def bing_web_search(query, subscription_key, endpoint, market='en-US', language='en', timeout=2000):
242
+ """
243
+ Perform a search using the Bing Web Search API with a set timeout.
244
+
245
+ Args:
246
+ query (str): Search query.
247
+ subscription_key (str): Subscription key for the Bing Search API.
248
+ endpoint (str): Endpoint for the Bing Search API.
249
+ market (str): Market, e.g., "en-US" or "zh-CN".
250
+ language (str): Language of the results, e.g., "en".
251
+ timeout (int or float or tuple): Request timeout in seconds.
252
+ Can be a float representing the total timeout,
253
+ or a tuple (connect timeout, read timeout).
254
+
255
+ Returns:
256
+ dict: JSON response of the search results. Returns None or raises an exception if the request times out.
257
+ 函数的目标是使用 Bing Web Search API 执行搜索,并返回 JSON 格式的结果。
258
+ 如果请求超时或出现其他问题,返回空字典({})或抛出异常
259
+ """
260
+ payload = json.dumps({
261
+ "q": query, # 设置查询内容
262
+ "mkt": market, # 设置市场
263
+ "setLang": language, # 设置语言
264
+ "textDecorations": True, # 启用文本装饰
265
+ "textFormat": "HTML" # 设置文本格式
266
+ })
267
+
268
+ headers = {
269
+ 'X-API-KEY': subscription_key,
270
+ 'Content-Type': 'application/json'
271
+ }
272
+ error_cnt = 0
273
+ while True:
274
+ if error_cnt == 20:
275
+ print(f"qery: {query} has tried {error_cnt} times without success, just skip it.")
276
+ break
277
+ try:
278
+ # 发送POST请求
279
+ response = requests.request("POST", endpoint, headers=headers, data=payload, proxies=proxies, timeout=timeout)
280
+ response.raise_for_status() # Raise exception if the request failed 检查响应的状态码。如果返回的状态码是 4xx 或 5xx(表示客户端或服务器错误),它将引发 requests.exceptions.HTTPError 异常
281
+ search_results = response.json() #
282
+ return search_results
283
+ except Timeout:
284
+ error_cnt += 1
285
+ print(f"error_cnt: {error_cnt}, Bing Web Search request timed out ({timeout} seconds) for query: {query}")
286
+ time.sleep(5)
287
+ # return {} # Or you can choose to raise an exception
288
+ except requests.exceptions.RequestException as e:
289
+ error_cnt += 1
290
+ print(f"error_cnt: {error_cnt}, Error occurred during Bing Web Search request: {e}, payload: {payload}")
291
+ time.sleep(5)
292
+ # return {}
293
+
294
+
295
+ def extract_pdf_text(url):
296
+ """
297
+ Extract text from a PDF.
298
+
299
+ Args:
300
+ url (str): URL of the PDF file.
301
+
302
+ Returns:
303
+ str: Extracted text content or error message.
304
+ """
305
+ try:
306
+ response = session.get(url, timeout=20) # Set timeout to 20 seconds
307
+ if response.status_code != 200:
308
+ return f"Error: Unable to retrieve the PDF (status code {response.status_code})"
309
+
310
+ # Open the PDF file using pdfplumber
311
+ with pdfplumber.open(BytesIO(response.content)) as pdf:
312
+ full_text = ""
313
+ for page in pdf.pages:
314
+ text = page.extract_text()
315
+ if text:
316
+ full_text += text
317
+
318
+ # Limit the text length
319
+ cleaned_text = ' '.join(full_text.split()[:600])
320
+ return cleaned_text
321
+ except requests.exceptions.Timeout:
322
+ return "Error: Request timed out after 20 seconds"
323
+ except Exception as e:
324
+ return f"Error: {str(e)}"
325
+
326
+ # def extract_relevant_info(search_results):
327
+ # """
328
+ # Extract relevant information from Bing search results.
329
+
330
+ # Args:
331
+ # search_results (dict): JSON response from the Bing Web Search API.
332
+
333
+ # Returns:
334
+ # list: A list of dictionaries containing the extracted information.
335
+ # """
336
+ # useful_info = []
337
+
338
+ # if 'webPages' in search_results and 'value' in search_results['webPages']: # value 通常是一个列表,包含了搜索结果的每个页面信息
339
+ # for id, result in enumerate(search_results['webPages']['value']):
340
+ # info = {
341
+ # 'id': id + 1, # Increment id for easier subsequent operations 为每个结果分配一个 id,id + 1 是为了让 ID 从 1 开始,而不是从 0 开始。这对后续操作更直观
342
+ # 'title': result.get('name', ''), # 每个搜索结果中提取标题
343
+ # 'url': result.get('url', ''), # 每个搜索结果中提取 URL
344
+ # 'site_name': result.get('siteName', ''), # 每个搜索结果中提取站点名称
345
+ # 'date': result.get('datePublished', '').split('T')[0], # 提取���索结果的发布时间
346
+ # 'snippet': result.get('snippet', ''), # Remove HTML tags : 提取搜索结果的简短描述(即摘要或片段),result.get('snippet', '')。这里的 snippet 可能包含 HTML 标签,因此需要在后续的处理中可能会清除这些标签
347
+ # # Add context content to the information
348
+ # 'context': '' # Reserved field to be filled later
349
+ # }
350
+ # useful_info.append(info)
351
+
352
+ # return useful_info
353
+
354
+ def extract_relevant_info(search_results):
355
+ """
356
+ Extract relevant information from Bing search results.
357
+
358
+ Args:
359
+ search_results (dict): JSON response from the Bing Web Search API.
360
+
361
+ Returns:
362
+ list: A list of dictionaries containing the extracted information.
363
+ """
364
+ useful_info = []
365
+
366
+ if 'organic' in search_results : # value 通常是一个列表,包含了搜索结果的每个页面信息
367
+ for id, result in enumerate(search_results['organic']):
368
+ info = {
369
+ 'id': id + 1, # Increment id for easier subsequent operations 为每个结果分配一个 id,id + 1 是为了让 ID 从 1 开始,而不是从 0 开始。这对后续操作更直观
370
+ 'title': result.get('title', ''), # 每个搜索结果中提取标题
371
+ 'url': result.get('link', ''), # 每个搜索结果中提取 URL
372
+ 'site_name': result.get('siteName', ''), # 每个搜索结果中提取站点名称
373
+ 'date': result.get('datePublished', '').split('T')[0], # 提取搜索结果的发布时间
374
+ 'snippet': result.get('snippet', ''), # Remove HTML tags : 提取搜索结果的简短描述(即摘要或片段),result.get('snippet', '')。这里的 snippet 可能包含 HTML 标签,因此需要在后续的处理中可能会清除这些标签
375
+ # Add context content to the information
376
+ 'context': '' # Reserved field to be filled later
377
+ }
378
+ useful_info.append(info)
379
+
380
+ return useful_info
381
+
382
+
383
+ # ------------------------------------------------------------
384
+
385
+ if __name__ == "__main__":
386
+ # Example usage
387
+ # Define the query to search
388
+ query = "Structure of dimethyl fumarate"
389
+
390
+ # Subscription key and endpoint for Bing Search API
391
+ BING_SUBSCRIPTION_KEY = "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d"
392
+ if not BING_SUBSCRIPTION_KEY:
393
+ raise ValueError("Please set the BING_SEARCH_V7_SUBSCRIPTION_KEY environment variable.")
394
+
395
+ bing_endpoint = "https://google.serper.dev/search"
396
+
397
+ # Perform the search
398
+ print("Performing Bing Web Search...")
399
+ search_results = bing_web_search(query, BING_SUBSCRIPTION_KEY, bing_endpoint)
400
+
401
+ print("Extracting relevant information from search results...")
402
+ extracted_info = extract_relevant_info(search_results)
403
+
404
+ print("Fetching and extracting context for each snippet...")
405
+ for info in tqdm(extracted_info, desc="Processing Snippets"):
406
+ full_text = extract_text_from_url(info['url'], use_jina=False, jina_api_key="jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ", snippet=info["snippet"]) # Get full webpage text
407
+ if full_text and not full_text.startswith("Error"):
408
+ success, context = extract_snippet_with_context(full_text, info['snippet'])
409
+ if success:
410
+ info['context'] = context
411
+ print("-------------------")
412
+ print(f"Snippet: {info['snippet']}\nContext: {context}")
413
+
414
+ else:
415
+ info['context'] = f"Could not extract context. Returning first 8000 chars: {full_text[:8000]}"
416
+ else:
417
+ info['context'] = f"Failed to fetch full text: {full_text}"
418
+
419
+ # print("Your Search Query:", query)
420
+ # print("Final extracted information with context:")
421
+ # print(json.dumps(extracted_info, indent=2, ensure_ascii=False))
deep_search/data_syn/data_keypoints_stats.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from collections import Counter, OrderedDict
3
+ import matplotlib.pyplot as plt
4
+
5
+ # 读取数据
6
+ file_path = '/share/project/sunshuang/deep_search/data_for_rl/musique_tagged/final_selected_dataset.json'
7
+
8
+ with open(file_path, 'r') as f:
9
+ data = json.load(f)
10
+
11
+ # 提取 key_points 并统计每个关键词的出现次数
12
+ key_points_counter = Counter()
13
+
14
+ for entry in data:
15
+ domain_keypoints = entry.get('domain_keypoints', {})
16
+ key_points = domain_keypoints.get('key_points', [])
17
+ key_points_counter.update(key_points)
18
+
19
+
20
+
21
+ key_points_counter = OrderedDict(sorted(key_points_counter.items(), key=lambda item: item[1], reverse=True))
22
+ # 打印统计结果
23
+ # print("Key Points Count:")
24
+ # for key, count in key_points_counter.items():
25
+ # print(f"{key}: {count}")
26
+
27
+ # 将统计结果保存为 JSON 文件
28
+ key_points_count_file = 'key_points_count.json'
29
+ with open(key_points_count_file, 'w') as f:
30
+ json.dump(dict(key_points_counter), f, indent=4)
31
+
32
+ print(f"Key points count saved to '{key_points_count_file}'")
33
+
34
+ # 绘制饼图
35
+ labels = [f'{key} ({count})' for key, count in key_points_counter.items()]
36
+ sizes = list(key_points_counter.values())
37
+
38
+ plt.figure(figsize=(12, 7))
39
+ plt.pie(
40
+ sizes,
41
+ labels=labels,
42
+ autopct='%1.1f%%',
43
+ startangle=140
44
+ )
45
+ plt.title('Key Points Distribution')
46
+ plt.tight_layout()
47
+ plt.savefig('key_points_distribution_pie.png', bbox_inches='tight')
48
+ plt.close()
49
+
50
+ print("Pie chart saved as 'key_points_distribution_pie.png'")
deep_search/data_syn/data_stats.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # import json
2
+ # # import matplotlib.pyplot as plt
3
+ # # from collections import Counter
4
+
5
+ # # # 读取数据
6
+ # # file_path = '/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/merged_tagged_domain_keypoints_keywords_count_hop.json'
7
+
8
+ # # with open(file_path, 'r') as f:
9
+ # # data = json.load(f)
10
+
11
+ # # # 提取所需字段
12
+ # # domains = []
13
+ # # totals = []
14
+ # # special_totals = []
15
+ # # hops = []
16
+
17
+ # # for entry in data:
18
+ # # domain_info = entry.get('domain_keypoints', {})
19
+ # # # print(domain_info)
20
+ # # # print(f"idx: {entry['idx']}")
21
+ # # keywords_info = entry.get('keywords_count', {})
22
+
23
+ # # domains.append(domain_info.get('domain', 'Unknown'))
24
+ # # totals.append(keywords_info.get('total', 0))
25
+ # # special_totals.append(keywords_info.get('special_total', 0))
26
+ # # hops.append(entry.get('hop', 0))
27
+
28
+ # # # 统计domain分布并绘制饼图
29
+ # # domain_counts = Counter(domains)
30
+ # # domain_labels = [f'{dom} ({cnt})' for dom, cnt in domain_counts.items()]
31
+ # # domain_sizes = list(domain_counts.values())
32
+
33
+ # # plt.figure(figsize=(12, 7))
34
+ # # plt.pie(domain_sizes, labels=domain_labels, autopct='%1.1f%%', startangle=140)
35
+ # # plt.title('Domain Distribution (Count in Parentheses)')
36
+ # # plt.savefig('domain_distribution_pie.png')
37
+ # # plt.close()
38
+
39
+ # # # 辅助函数:绘制带数值标签的直方图
40
+ # # def plot_histogram(data, title, xlabel, output_filename):
41
+ # # counts = Counter(data)
42
+ # # sorted_items = sorted(counts.items())
43
+ # # labels, values = zip(*sorted_items)
44
+
45
+ # # plt.figure(figsize=(10, 6))
46
+ # # bars = plt.bar(labels, values)
47
+ # # plt.title(title)
48
+ # # plt.xlabel(xlabel)
49
+ # # plt.ylabel('Count')
50
+
51
+ # # for bar in bars:
52
+ # # height = bar.get_height()
53
+ # # plt.text(bar.get_x() + bar.get_width()/2., height, str(height),
54
+ # # ha='center', va='bottom')
55
+
56
+ # # plt.savefig(output_filename)
57
+ # # plt.close()
58
+
59
+ # # # 绘制各个直方图
60
+ # # plot_histogram(totals, 'Total Distribution', 'Total Value', 'total_histogram.png')
61
+ # # plot_histogram(special_totals, 'Special Total Distribution', 'Special Total Value', 'special_total_histogram.png')
62
+ # # plot_histogram(hops, 'Hop Distribution', 'Hop Value', 'hop_histogram.png')
63
+
64
+ # import json
65
+ # import matplotlib.pyplot as plt
66
+ # from collections import Counter, OrderedDict
67
+
68
+ # # 读取数据
69
+ # file_path = '/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/merged_tagged_domain_keypoints_keywords_count_hop.json'
70
+
71
+ # with open(file_path, 'r') as f:
72
+ # data = json.load(f)
73
+
74
+ # # 提取所需字段
75
+ # domains = []
76
+ # totals = []
77
+ # special_totals = []
78
+ # hops = []
79
+
80
+ # for entry in data:
81
+ # domain_info = entry.get('domain_keypoints', {})
82
+ # keywords_info = entry.get('keywords_count', {})
83
+
84
+ # domains.append(domain_info.get('domain', 'Unknown'))
85
+ # totals.append(keywords_info.get('total', 0))
86
+ # special_totals.append(keywords_info.get('special_total', 0))
87
+ # hops.append(entry.get('hop', 0))
88
+
89
+ # # 统计domain分布
90
+ # domain_counts = Counter(domains)
91
+
92
+ # # 按照 value 降序排序
93
+ # domain_counts = OrderedDict(sorted(domain_counts.items(), key=lambda item: item[1], reverse=True))
94
+
95
+ # # 保存所有domain及其计数到JSON文件
96
+ # domain_count_file = 'domain_counts.json'
97
+ # with open(domain_count_file, 'w') as f:
98
+ # json.dump(domain_counts, f, indent=4)
99
+
100
+ # # 合并数目少于10的domain为Other
101
+ # threshold = 100
102
+ # filtered_domains = {}
103
+ # other_count = 0
104
+
105
+ # for domain, count in domain_counts.items():
106
+ # if count < threshold:
107
+ # other_count += count
108
+ # else:
109
+ # filtered_domains[domain] = count
110
+
111
+ # if other_count > 0:
112
+ # filtered_domains['Other'] = other_count
113
+
114
+ # # 绘制饼图
115
+ # domain_labels = [f'{dom} ({cnt})' for dom, cnt in filtered_domains.items()]
116
+ # domain_sizes = list(filtered_domains.values())
117
+
118
+ # plt.figure(figsize=(12, 7))
119
+ # plt.pie(domain_sizes, labels=domain_labels, autopct='%1.1f%%', startangle=140)
120
+ # plt.title('Domain Distribution (Count in Parentheses)')
121
+ # plt.savefig('domain_distribution_pie.png')
122
+ # plt.close()
123
+
124
+ # # 辅助函数:绘制带数值标签的直方图
125
+ # def plot_histogram(data, title, xlabel, output_filename):
126
+ # counts = Counter(data)
127
+ # sorted_items = sorted(counts.items())
128
+ # labels, values = zip(*sorted_items)
129
+
130
+ # plt.figure(figsize=(10, 6))
131
+ # bars = plt.bar(labels, values)
132
+ # plt.title(title)
133
+ # plt.xlabel(xlabel)
134
+ # plt.ylabel('Count')
135
+
136
+ # for bar in bars:
137
+ # height = bar.get_height()
138
+ # plt.text(bar.get_x() + bar.get_width()/2., height, str(height),
139
+ # ha='center', va='bottom')
140
+
141
+ # plt.savefig(output_filename)
142
+ # plt.close()
143
+
144
+ # # 绘制各个直方图
145
+ # plot_histogram(totals, 'Total Distribution', 'Total Value', 'total_histogram.png')
146
+ # plot_histogram(special_totals, 'Special Total Distribution', 'Special Total Value', 'special_total_histogram.png')
147
+ # plot_histogram(hops, 'Hop Distribution', 'Hop Value', 'hop_histogram.png')
148
+
149
+
150
+
151
+ import json
152
+ import matplotlib
153
+ import matplotlib.pyplot as plt
154
+ from collections import Counter, OrderedDict
155
+ matplotlib.rcParams['font.family'] = ['Songti SC'] # macOS 用户用宋体
156
+ # 读取数据
157
+ file_path = '/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/merged_4k/4k.json'
158
+
159
+ with open(file_path, 'r') as f:
160
+ data = json.load(f)
161
+
162
+ # 提取所需字段
163
+ domains = []
164
+ totals = []
165
+ special_totals = []
166
+ hops = []
167
+
168
+ for entry in data:
169
+ domain_info = entry.get('domain_keypoints', {})
170
+ keywords_info = entry.get('keywords_count', {})
171
+
172
+ domains.append(domain_info.get('domain', 'Unknown'))
173
+ totals.append(keywords_info.get('total', 0))
174
+ special_totals.append(keywords_info.get('special_total', 0))
175
+ hops.append(entry.get('hop', 0))
176
+
177
+ # 统计domain分布
178
+ domain_counts = Counter(domains)
179
+
180
+ # 按照 value 降序排序
181
+ domain_counts = OrderedDict(sorted(domain_counts.items(), key=lambda item: item[1], reverse=True))
182
+
183
+ # 保存所有domain及其计数到JSON文件
184
+ domain_count_file = 'domain_counts.json'
185
+ with open(domain_count_file, 'w') as f:
186
+ json.dump(domain_counts, f, indent=4)
187
+
188
+ # 合并数目少于100的domain为Other
189
+ threshold = 1
190
+ filtered_domains = {}
191
+ other_count = 0
192
+
193
+ for domain, count in domain_counts.items():
194
+ if count < threshold:
195
+ other_count += count
196
+ else:
197
+ filtered_domains[domain] = count
198
+
199
+ if other_count > 0:
200
+ filtered_domains['Other'] = other_count
201
+
202
+ # 绘制饼图
203
+ domain_labels = [f'{dom}' for dom, cnt in filtered_domains.items()] # 只显示domain名称
204
+ domain_sizes = list(filtered_domains.values())
205
+
206
+ plt.figure(figsize=(12, 7))
207
+ plt.pie(
208
+ domain_sizes,
209
+ labels=None, # 不直接在饼图上显示标签
210
+ autopct='%1.1f%%',
211
+ startangle=140
212
+ )
213
+
214
+ # 添加图例
215
+ plt.legend(
216
+ labels=[f'{dom} ({cnt})' for dom, cnt in filtered_domains.items()],
217
+ loc='upper right',
218
+ bbox_to_anchor=(1.2, 1), # 图例位置调整到右侧
219
+ fontsize=10
220
+ )
221
+
222
+ plt.title('Domain Distribution (Count in Parentheses)')
223
+ plt.tight_layout() # 优化布局
224
+ plt.savefig('domain_distribution_pie.png', bbox_inches='tight', dpi=1000) # 避免裁剪
225
+ plt.close()
226
+
227
+ # 辅助函数:绘制带数值标签的直方图
228
+ def plot_histogram(data, title, xlabel, output_filename):
229
+ counts = Counter(data)
230
+ sorted_items = sorted(counts.items())
231
+ labels, values = zip(*sorted_items)
232
+
233
+ plt.figure(figsize=(10, 6))
234
+ bars = plt.bar(labels, values)
235
+ plt.title(title)
236
+ plt.xlabel(xlabel)
237
+ plt.ylabel('数量')
238
+
239
+ for bar in bars:
240
+ height = bar.get_height()
241
+ plt.text(bar.get_x() + bar.get_width()/2., height, str(height),
242
+ ha='center', va='bottom')
243
+
244
+ plt.savefig(output_filename)
245
+ plt.close()
246
+
247
+ # 绘制各个直方图
248
+ # plot_histogram(totals, 'Total Distribution', 'Total Value', 'total_histogram.png')
249
+ plot_histogram(special_totals, 'Special Question Words Distribution', '问题中疑问词数目', 'special_total_histogram.png')
250
+ # plot_histogram(hops, 'Hop Distribution', 'Hop Value', 'hop_histogram.png')
deep_search/data_syn/data_tag.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import json
4
+ from vllm import LLM, SamplingParams
5
+ from datasets import Dataset
6
+ from transformers import AutoTokenizer
7
+ import random
8
+ from tqdm import tqdm
9
+
10
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
11
+
12
+
13
+ user_prompt = f"""You are an expert in complex problem reasoning. Below are definitions and examples of various reasoning graph shapes. Following these, you will receive a candidate question. Your task is to:
14
+
15
+ Analyze the reasoning graph shape of the candidate question, using the provided examples as a guide.
16
+ Determine the subject area of the question (choose from options such as Music, Sports, Geography, Art, Politics, Science and Technology, Video Games, History, TV Shows, or Other).
17
+ Identify the key factual elements needed to answer the question.
18
+
19
+ ```
20
+ [Difinitions and examples of various reasoning graph shapes]:
21
+
22
+ + 1. 2-hop Reasoning (2-step chain)
23
+ Basic Pattern: A simple linear chain with two consecutive steps, no branching or cycles.
24
+ Example Question:
25
+ "Who succeeded the first President of Namibia?"
26
+ Decomposition:
27
+ Step 1: Identify the first President of Namibia.
28
+ Step 2: Determine who succeeded that president.
29
+
30
+ + 2. 3-hop Reasoning (3-step chain)
31
+ Basic Pattern: A linear chain with three consecutive steps, still without any branching.
32
+ Example Question:
33
+ "What currency is used where Billy Giles died?"
34
+ Decomposition:
35
+ Step 1: Find out where Billy Giles died.
36
+ Step 2: Determine which country or region that place belongs to.
37
+ Step 3: Identify the currency used in that country or region.
38
+
39
+ + 3. 3-hop Reasoning with Branching (3-step chain + branching)
40
+ Basic Pattern: Three steps where the first two steps run in parallel (branching), and the final step combines the results.
41
+ Example Question:
42
+ "When was the first establishment that McDonaldization is named after, open in the country Horndean is located?"
43
+ Decomposition:
44
+ Branch 1: Determine the country where Horndean is located.
45
+ Branch 2: Identify the establishment that McDonaldization is named after (and find out details such as its first branch).
46
+ Final Step: Merge the information from both branches to determine when that establishment opened in the identified country.
47
+
48
+ + 4. 4-hop Reasoning (4-step chain)
49
+ Basic Pattern: A linear chain requiring four consecutive steps, with no branching.
50
+ Example Question:
51
+ "When did Napoleon occupy the city where the mother of the woman who brought Louis XVI style to the court died?"
52
+ Decomposition:
53
+ Step 1: Identify the woman who brought Louis XVI style to the court.
54
+ Step 2: Determine who her mother was.
55
+ Step 3: Find out in which city her mother died.
56
+ Step 4: Ascertain when Napoleon occupied that city.
57
+
58
+ + 5. 4-hop Reasoning with Branching (4-step chain + branching)
59
+ Basic Pattern: A four-step reasoning process that involves parallel queries (branching) before combining the results.
60
+ Example Question:
61
+ "How many Germans live in the colonial holding in Aruba’s continent that was governed by Prazeres’s country?"
62
+ Decomposition:
63
+ Branch 1: Identify which continent Aruba is located in.
64
+ Branch 2: Determine which country Prazeres is from.
65
+ Step 3: Identify the colonial holding on that continent governed by the country from Branch 2.
66
+ Step 4: Find out how many Germans live in that colonial holding.
67
+
68
+ + 6. 3-hop Reasoning with Branching and Cycle (3-step chain + branching + cycle)
69
+ Basic Pattern: A three-step process with parallel branches that later require recombination, with a cyclic element where earlier results are referenced again in the final step.
70
+ Example Question:
71
+ "When did the people who captured Malakoff come to the region where Philipsburg is located?"
72
+ Decomposition:
73
+ Branch 1: Determine the region where Philipsburg is located.
74
+ Branch 2: Identify who captured Malakoff.
75
+ Final Step: Combine the results from Branch 1 and Branch 2—this involves a cyclic reference to the earlier information—to determine when those people arrived in the region.
76
+
77
+ + Overall Summary:
78
+
79
+ n-hop: Refers to the number of steps or connected information points needed to reach the answer.
80
+ Branching: Occurs when multiple independent queries (sub-questions) need to be solved in parallel before being combined.
81
+ Cycle: Involves a loop or a back-reference where later steps depend on revisiting or combining earlier results.
82
+
83
+ [Examples of key points]
84
+ Example 1:
85
+ Question: “Are director of film Move (1970 Film) and director of film Mediterranee (1963 Film) from the same country?”
86
+ Key points: director, country.
87
+
88
+ Example 2:
89
+ Question: "Which film whose director is younger, Charge It To Me or Danger: Diabolik?"
90
+ Key points: director, age.
91
+
92
+ Example 4:
93
+ Question: "Does Mukasa Mbidde have the same nationality as Erich Maas?"
94
+ Key points: nationality.
95
+
96
+ Example 5:
97
+ Question: "Which film was released more recently, Die schöne Lurette or Sabhash?"
98
+ Key points: film, release date.
99
+
100
+ **Notably, name is not the key point.**
101
+ ```
102
+
103
+ [Candidate Question]
104
+ {{question}}
105
+
106
+ **Please do not generate any analysis and exactly format your answer as follows:
107
+
108
+ Reasoning Graph: [the name of predicted reasoning graph here, do not generate other analysis]
109
+ Area: [the subject area]
110
+ Key Points: [the key factual elements]**"""
111
+
112
+
113
+ def main(input_file_path, output_path):
114
+
115
+ data = []
116
+ with open(input_file_path, "r", encoding="utf-8") as file:
117
+ # for line in file:
118
+ # data.append(json.loads(line))
119
+ data = json.load(file)
120
+
121
+
122
+ prompts = []
123
+ new_data = []
124
+
125
+ for item in tqdm(data, desc="gen prompts"):
126
+ prompt = user_prompt.format(question=item["Question"])
127
+ # item = f"请解释下面的内容:{item}"
128
+ # message = {"role": "user", "content": item}
129
+ prompt = {"role": "user", "content": prompt}
130
+
131
+ message = tokenizer.apply_chat_template(
132
+ [prompt], add_generation_prompt=True, tokenize=False
133
+ )
134
+ prompts.append(message)
135
+ item["tag_prompt"] = message
136
+ # new_data.append(prompt)
137
+ print(prompts[0])
138
+ outputs = llm.generate(prompts, sampling_params)
139
+
140
+ generated_texts = []
141
+ for i, output in tqdm(enumerate(outputs), desc="process outputs"):
142
+ # prompt = output.prompt
143
+ generated_text = output.outputs[0].text.strip()
144
+ data[i]["tag_qwq"] = generated_text
145
+
146
+ generated_texts.append(data[i])
147
+
148
+
149
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
150
+ with open(output_path, "w", encoding="utf-8") as json_file:
151
+ json.dump(generated_texts, json_file, ensure_ascii=False, indent=4)
152
+ # with open(output_path, 'w', encoding='utf-8') as f:
153
+ # for item in generated_texts:
154
+ # f.write(json.dumps(item, ensure_ascii=False) + '\n')
155
+
156
+ print(f"save to {output_path}")
157
+ if __name__ == "__main__":
158
+ parser = argparse.ArgumentParser(description="Process input file and CUDA devices.")
159
+ parser.add_argument("--input_file_path", type=str, required=True, help="Path to the input JSON file.")
160
+ parser.add_argument("--cuda_visible_devices", type=str, required=True, help="CUDA devices to use (e.g., '0,1').")
161
+ args = parser.parse_args()
162
+
163
+ os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_visible_devices
164
+ input_file_path = args.input_file_path
165
+
166
+
167
+ model_name = "qwq"
168
+ model_path = "/share/project/zhipengchen/model/QwQ-32B"
169
+
170
+
171
+ # input_file_path = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/split_1.json"
172
+ file_name = input_file_path.split(".")[0]
173
+ output_path = f"{file_name}_tagged.json"
174
+
175
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
176
+ temperature = 0.3
177
+ sampling_params = SamplingParams(n=1, temperature=temperature, max_tokens=8000, top_p=0.99, top_k=50)
178
+ llm = LLM(model=model_path, tensor_parallel_size=2, gpu_memory_utilization=0.95,trust_remote_code=True)
179
+
180
+ print(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
181
+ print(f"model path: {model_path}")
182
+ print(f"temperature: {temperature}")
183
+ print(f"input_file_path: {input_file_path}")
184
+
185
+ main(input_file_path, output_path)
186
+
deep_search/data_syn/data_tag.sh ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
3
+ --input_file_path "data/mixed_data/splits/split_1.json" \
4
+ --cuda_visible_devices "0,1" > data/mixed_data/logs/split_1.log 2>&1 &
5
+
6
+
7
+
8
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
9
+ --input_file_path "data/mixed_data/splits/split_2.json" \
10
+ --cuda_visible_devices "2,3" > data/mixed_data/logs/split_2.log 2>&1 &
11
+
12
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
13
+ --input_file_path "data/mixed_data/splits/split_3.json" \
14
+ --cuda_visible_devices "4,5" > data/mixed_data/logs/split_3.log 2>&1 &
15
+
16
+
17
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
18
+ --input_file_path "data/mixed_data/splits/split_4.json" \
19
+ --cuda_visible_devices "6,7" > data/mixed_data/logs/split_4.log 2>&1 &
20
+
21
+
22
+ # worker 3
23
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
24
+ --input_file_path "data/mixed_data/splits/split_1.json" \
25
+ --cuda_visible_devices "0,1" > data/mixed_data/logs/split_1.log 2>&1 &
26
+
27
+
28
+
29
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
30
+ --input_file_path "data/mixed_data/splits/split_2.json" \
31
+ --cuda_visible_devices "2,3" > data/mixed_data/logs/split_2.log 2>&1 &
32
+
33
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
34
+ --input_file_path "data/mixed_data/splits/split_3.json" \
35
+ --cuda_visible_devices "4,5" > data/mixed_data/logs/split_3.log 2>&1 &
36
+
37
+
38
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag.py \
39
+ --input_file_path "data/mixed_data/splits/split_4.json" \
40
+ --cuda_visible_devices "6,7" > data/mixed_data/logs/split_4.log 2>&1 &
41
+
42
+ ############### tag domain and key points ###############
43
+
44
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
45
+ --input_file_path "data/mixed_data/splits/split_4.json" \
46
+ --cuda_visible_devices "6,7" > data/mixed_data/logs/tag_domain_keypoints_split_4.log 2>&1 &
47
+
48
+
49
+
50
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
51
+ --input_file_path "data/mixed_data/splits/split_1.json" \
52
+ --cuda_visible_devices "0,1" > data/mixed_data/logs/tag_domain_keypoints_split_1.log 2>&1 &
53
+
54
+
55
+
56
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
57
+ --input_file_path "data/mixed_data/splits/split_2.json" \
58
+ --cuda_visible_devices "2,3" > data/mixed_data/logs/tag_domain_keypoints_split_2.log 2>&1 &
59
+
60
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
61
+ --input_file_path "data/mixed_data/splits/split_3.json" \
62
+ --cuda_visible_devices "4,5" > data/mixed_data/logs/tag_domain_keypoints_split_3.log 2>&1 &
63
+
64
+
65
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
66
+ --input_file_path "data/mixed_data/splits/split_4.json" \
67
+ --cuda_visible_devices "6,7" > data/mixed_data/logs/tag_domain_keypoints_split_4.log 2>&1 &
68
+
69
+
70
+ # worker 3
71
+
72
+
73
+
74
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
75
+ --input_file_path "data/mixed_data/splits/split_5.json" \
76
+ --cuda_visible_devices "2,3" > data/mixed_data/logs/tag_domain_keypoints_split_5.log 2>&1 &
77
+
78
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
79
+ --input_file_path "data/mixed_data/splits/split_6.json" \
80
+ --cuda_visible_devices "4,5" > data/mixed_data/logs/tag_domain_keypoints_split_6.log 2>&1 &
81
+
82
+
83
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
84
+ --input_file_path "data/mixed_data/splits/split_7.json" \
85
+ --cuda_visible_devices "6,7" > data/mixed_data/logs/tag_domain_keypoints_split_7.log 2>&1 &
86
+
87
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u data_tag_domain_keypoints.py \
88
+ --input_file_path "data/mixed_data/splits/split_8.json" \
89
+ --cuda_visible_devices "0,1" > data/mixed_data/logs/tag_domain_keypoints_split_8.log 2>&1 &
90
+
91
+
92
+ ####################
93
+
94
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u /share/project/sunshuang/deep_search/data_syn/data_tag_domain_keypoints.py \
95
+ --input_file_path "/share/project/sunshuang/deep_search/data_for_rl/musique_ans_v1.0_train_only_qa.json" \
96
+ --cuda_visible_devices "0,1,2,3,4,5,6,7" > /share/project/sunshuang/deep_search/data_for_rl/tag_musique.log 2>&1 &
97
+
98
+
99
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u /share/project/sunshuang/deep_search/data_syn/data_tag_domain_keypoints.py \
100
+ --input_file_path "/share/project/sunshuang/deep_search/data_for_rl/MultiHopRAG_only_qa.json" \
101
+ --cuda_visible_devices "0,1,2,3,4,5,6,7" > /share/project/sunshuang/deep_search/data_for_rl/MultiHopRAG_only_qa.log 2>&1 &
102
+
103
+
104
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u /share/project/sunshuang/deep_search/data_syn/data_tag_domain_keypoints.py \
105
+ --input_file_path "/share/project/sunshuang/deep_search/data_for_rl/2wiki_train_only_compositional_qa.json" \
106
+ --cuda_visible_devices "0,1,2,3,4,5,6,7" > /share/project/sunshuang/deep_search/data_for_rl/2wiki_train_only_compositional_qa.log 2>&1 &
107
+
108
+
109
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u /share/project/sunshuang/deep_search/data_syn/data_tag_domain_keypoints.py \
110
+ --input_file_path "/share/project/sunshuang/deep_search/data_for_rl/merged_data.json" \
111
+ --cuda_visible_devices "0,1,2,3,4,5,6,7" > /share/project/sunshuang/deep_search/data_for_rl/merged_data.log 2>&1 &
112
+
113
+
deep_search/data_syn/data_tag_domain_keypoints.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import json
4
+ from vllm import LLM, SamplingParams
5
+ from datasets import Dataset
6
+ from transformers import AutoTokenizer
7
+ import random
8
+ from tqdm import tqdm
9
+ import torch
10
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
11
+
12
+ user_prompt = f"""You are an advanced semantic analyzer. For the given question, perform the following tasks step-by-step:
13
+
14
+ 1. **Domain Identification**:
15
+ - Determine the broad subject category (domain) this question belongs to.
16
+ - Examples: film, history, biology, geography, politics, technology, etc (or any other suitable domain)
17
+
18
+ 2. **Key Point Extraction**:
19
+ - Identify 2-4 core semantic components that are crucial for answering
20
+ - Include:
21
+ • Key entities (e.g., films, people, locations)
22
+ • Critical attributes (e.g., age, duration, population)
23
+ • Core relationships (e.g., comparison, causality)
24
+ • Measurement dimensions (e.g., time, quantity)
25
+ - Exclude filler words and non-essential descriptors
26
+
27
+ **Output Requirements**:
28
+ - Use JSON format: {{"domain": "...", "key_points": [...]}}
29
+ - Keep key_points concise (1-2 words each)
30
+ - Use lowercase for all outputs
31
+ - Separate multiple key_points with commas
32
+
33
+ **Examples**:
34
+ Question: "Which film whose director is younger, Charge It To Me or Danger: Diabolik?"
35
+ Output: {{"domain": "film", "key_points": ["director", "age"]}}
36
+
37
+ **Now process this question:**
38
+ {{Question}}"""
39
+
40
+ def main(input_file_path, output_path):
41
+
42
+ data = []
43
+ with open(input_file_path, "r", encoding="utf-8") as file:
44
+ # for line in file:
45
+ # data.append(json.loads(line))
46
+ data = json.load(file)
47
+
48
+
49
+ prompts = []
50
+ new_data = []
51
+
52
+ for item in tqdm(data, desc="gen prompts"):
53
+ prompt = user_prompt.replace('{Question}', item["question"])
54
+ # item = f"请解释下面的内容:{item}"
55
+ # message = {"role": "user", "content": item}
56
+ messages = [{"role": "user", "content": prompt}]
57
+
58
+ text = tokenizer.apply_chat_template(
59
+ messages, add_generation_prompt=True, tokenize=False
60
+ )
61
+ prompts.append(text)
62
+ item["tag_prompt"] = text
63
+ # new_data.append(prompt)
64
+ print(prompts[0])
65
+ outputs = llm.generate(prompts, sampling_params)
66
+
67
+ generated_texts = []
68
+ for i, output in tqdm(enumerate(outputs), desc="process outputs"):
69
+ # prompt = output.prompt
70
+ generated_text = output.outputs[0].text.strip()
71
+ data[i]["tag_qwq"] = generated_text
72
+
73
+ generated_texts.append(data[i])
74
+
75
+
76
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
77
+ with open(output_path, "w", encoding="utf-8") as json_file:
78
+ json.dump(generated_texts, json_file, ensure_ascii=False, indent=4)
79
+ # with open(output_path, 'w', encoding='utf-8') as f:
80
+ # for item in generated_texts:
81
+ # f.write(json.dumps(item, ensure_ascii=False) + '\n')
82
+
83
+ print(f"save to {output_path}")
84
+ if __name__ == "__main__":
85
+ parser = argparse.ArgumentParser(description="Process input file and CUDA devices.")
86
+ parser.add_argument("--input_file_path", type=str, required=True, help="Path to the input JSON file.")
87
+ parser.add_argument("--cuda_visible_devices", type=str, required=True, help="CUDA devices to use (e.g., '0,1').")
88
+ args = parser.parse_args()
89
+
90
+ os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_visible_devices
91
+ print(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
92
+ input_file_path = args.input_file_path
93
+
94
+
95
+ model_name = "qwq"
96
+ model_path = "/share/project/zhipengchen/model/QwQ-32B"
97
+
98
+
99
+ # input_file_path = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/split_1.json"
100
+ # file_name = input_file_path.split(".")[0]
101
+ file_name = os.path.basename(input_file_path).split(".")[0]
102
+ base_path = os.path.dirname(input_file_path)
103
+ output_dir = os.path.join(base_path, "tagged_domain_keypoints")
104
+ os.makedirs(output_dir, exist_ok=True)
105
+ output_path = os.path.join(output_dir, f"{file_name}_tagged.json")
106
+
107
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
108
+
109
+ sampling_params = SamplingParams(n=1, temperature=0.6, max_tokens=10000, top_p=0.95, top_k=40)
110
+ llm = LLM(model=model_path, tensor_parallel_size=torch.cuda.device_count(), gpu_memory_utilization=0.95,trust_remote_code=True)
111
+
112
+ print(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
113
+ print(f"model path: {model_path}")
114
+ print(f"input_file_path: {input_file_path}")
115
+ print(f"output_path: {output_path}")
116
+
117
+ main(input_file_path, output_path)
118
+
deep_search/data_syn/extract_domain_keypoints.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import torch
4
+ from tqdm import tqdm
5
+ import ast
6
+
7
+ def save_to_json(data, filename):
8
+ with open(filename, 'w', encoding='utf-8') as f:
9
+ json.dump(data, f, ensure_ascii=False, indent=4)
10
+ print(f"save to {filename}, data len: {len(data)}")
11
+ def load_json(file_path):
12
+ with open(file_path, "r", encoding="utf-8") as f:
13
+ data = json.load(f)
14
+ print(f"load from {file_path}, data len: {len(data)}")
15
+ return data
16
+
17
+ # def extract_braces(text):
18
+ # # 匹配所有 {} 及其内容(非贪婪模式)
19
+ # matches = re.findall(r'\{.*?\}', text)
20
+ # return matches
21
+
22
+ def extract_brace_content(s):
23
+ matches = re.findall(r'\{[^}]*\}', s)
24
+ return matches[-1] if matches else ''
25
+
26
+ def extract_domain_keypoints(input_file_path, output_file_path):
27
+ data = load_json(input_file_path)
28
+
29
+ error_count = 0
30
+ error_two_count = 0
31
+ # match_zero = 0
32
+ # match_one = 0
33
+ # match_more = 0
34
+
35
+ new_data = []
36
+ for item in tqdm(data):
37
+ domain_keypoints = item["tag_qwq"].split("\n</think>\n\n")[-1]
38
+
39
+ try: # 能够直接解析的
40
+ domain_keypoints_formatted = json.loads(domain_keypoints)
41
+ item["domain_keypoints"] = domain_keypoints_formatted
42
+ except:
43
+ error_count += 1
44
+ # print(f"error: {domain_keypoints_formatted}")
45
+ try: # 二次解析的
46
+ domain_keypoints = domain_keypoints.split("\n\n")[0]
47
+ # print("-----------")
48
+ # print(domain_keypoints)
49
+ item["domain_keypoints"] = json.loads(domain_keypoints)
50
+ except:
51
+ error_two_count += 1
52
+ print(f"error_two_count: {[domain_keypoints]}")
53
+ item["domain_keypoints"] = domain_keypoints
54
+ # try:
55
+ # matches = extract_brace_content(domain_keypoints)
56
+ # print(matches)
57
+ # if len(matches) == 0:
58
+ # match_zero += 1
59
+ # item["domain_keypoints"] = domain_keypoints_formatted
60
+ # elif len(matches) == 1:
61
+ # match_one += 1
62
+ # item["domain_keypoints"] = json.loads(matches[-1])
63
+ # else:
64
+ # match_more += 1
65
+ # # print(f"more: {matches}")
66
+ # item["domain_keypoints"] = json.loads(matches[-1])
67
+ # except:
68
+ # print(f"match_error: {domain_keypoints_formatted}")
69
+ # # print(f"match_error": )
70
+ # item["domain_keypoints"] = domain_keypoints_formatted
71
+ # # item["domain_keypoints"] = domain_keypoints
72
+ if isinstance(item["domain_keypoints"], dict):
73
+ new_data.append(item)
74
+ print(f"error_count: {error_count}")
75
+ print(f"error_two_count: {error_two_count}")
76
+ # print(f"match_zero: {match_zero}")
77
+ # print(f"match_one: {match_one}")
78
+ # print(f"match_more: {match_more}")
79
+ print(f"len data: {len(data)}")
80
+ print(f"len new_data: {len(new_data)}")
81
+ save_to_json(new_data, output_file_path)
82
+
83
+
84
+ if __name__ == "__main__":
85
+ input_file_path = "/share/project/sunshuang/deep_search/data_for_rl/tagged_domain_keypoints/merged_data_tagged.json"
86
+ output_file_path = "/share/project/sunshuang/deep_search/data_for_rl/tagged_domain_keypoints/merged_data_tagged_domain_keypoints.json"
87
+ extract_domain_keypoints(input_file_path, output_file_path)
deep_search/data_syn/graph_selection.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ def save_to_json(data, filename):
5
+ with open(filename, 'w', encoding='utf-8') as f:
6
+ json.dump(data, f, ensure_ascii=False, indent=4)
7
+ print(f"save to {filename}, data len: {len(data)}")
8
+ def load_json(file_path):
9
+ with open(file_path, "r", encoding="utf-8") as f:
10
+ data = json.load(f)
11
+ print(f"load from {file_path}, data len: {len(data)}")
12
+ return data
13
+
14
+
15
+ # 输入和输出路径
16
+ input_folder = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits"
17
+ # output_folder = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/code/graph"
18
+
19
+ # # 创建输出文件夹
20
+ # os.makedirs(output_folder, exist_ok=True)
21
+
22
+ # 初始化分类字典
23
+ classified_data = {
24
+ "1-hop": [],
25
+ "2-hop": [],
26
+ "3-hop": [],
27
+ "4-hop": [],
28
+ "5-hop": [],
29
+ "other": []
30
+ }
31
+
32
+ new_data = []
33
+ # 遍历目标文件夹中的所有文件
34
+ for filename in sorted(os.listdir(input_folder)):
35
+
36
+ if filename == "tagged_domain_keypoints":
37
+ continue
38
+ if "tagged" in filename and filename.endswith(".json"):
39
+ file_path = os.path.join(input_folder, filename)
40
+ print(filename)
41
+ # 读取 JSON 文件
42
+ with open(file_path, "r", encoding="utf-8") as f:
43
+ data = json.load(f)
44
+
45
+ # 遍历数据中的每个条目
46
+ for item in data:
47
+ # 提取 Reasoning Graph 部分
48
+ tag_qwq = item.get("tag_qwq", "")
49
+ reasoning_graph = tag_qwq.split("</think>")[-1].split("Graph:")[-1].split("Area:")[0].strip()
50
+
51
+ # 判断 hop 数
52
+ hop = None
53
+ if "1-hop" in reasoning_graph:
54
+ hop = "1-hop"
55
+ item["hop"] = 1
56
+ elif "2-hop" in reasoning_graph:
57
+ hop = "2-hop"
58
+ item["hop"] = 2
59
+ elif "3-hop" in reasoning_graph:
60
+ hop = "3-hop"
61
+ item["hop"] = 3
62
+ elif "4-hop" in reasoning_graph:
63
+ hop = "4-hop"
64
+ item["hop"] = 4
65
+ elif "5-hop" in reasoning_graph:
66
+ hop = "5-hop"
67
+ item["hop"] = 5
68
+ else:
69
+ hop = "other"
70
+ item["hop"] = -1
71
+ new_data.append(item)
72
+ # 将条目添加到对应的分类
73
+ # classified_data[hop].append(item)
74
+
75
+ # 保存分类后的数据到不同文件
76
+ # for hop, items in classified_data.items():
77
+ # output_file = os.path.join(output_folder, f"{hop}.json")
78
+ # with open(output_file, "w", encoding="utf-8") as f:
79
+ # json.dump(items, f, ensure_ascii=False, indent=4)
80
+ # print(f"Saved {len(items)} items to {output_file}")
81
+
82
+ data_1 = load_json("/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/merged_tagged_domain_keypoints_keywords_count.json")
83
+
84
+ assert len(new_data) == len(data_1)
85
+ for item, item_1 in zip(new_data, data_1):
86
+ assert item["idx"] == item_1["idx"], f"{item['idx']} {item_1['idx']}"
87
+ assert item["Question"] == item_1["Question"]
88
+
89
+ item_1["hop"] = item["hop"]
90
+
91
+ save_to_json(data_1, "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/merged_tagged_domain_keypoints_keywords_count_hop.json")
deep_search/data_syn/json_to_text.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import random
4
+ from tqdm import tqdm
5
+
6
+ # 设置随机种子以保证结果可复现
7
+ random.seed(0)
8
+
9
+ def save_to_json(data, filename):
10
+ """保存数据到 JSON 文件"""
11
+ with open(filename, 'w', encoding='utf-8') as f:
12
+ json.dump(data, f, ensure_ascii=False, indent=4)
13
+ print(f"Saved to {filename}, data length: {len(data)}")
14
+
15
+ def load_json(file_path):
16
+ """从 JSON 文件加载数据"""
17
+ with open(file_path, "r", encoding="utf-8") as f:
18
+ data = json.load(f)
19
+ print(f"Loaded from {file_path}, data length: {len(data)}")
20
+ return data
21
+
22
+ def save_questions_to_txt(data, output_txt_file):
23
+ """将数据中的 Question 字段保存到 TXT 文件"""
24
+ with open(output_txt_file, 'w', encoding='utf-8') as f:
25
+ for item in data:
26
+ question = item.get("Question", "") # 获取 Question 字段,默认为空字符串
27
+ if question: # 如果 Question 存在,则写入文件
28
+ f.write(f"idx: {item['idx']}\n")
29
+ f.write(f"question: {item['Question']}\n\n") # 每个问题占一行
30
+ print(f"Questions saved to {output_txt_file}")
31
+
32
+ # 输入和输出文件路径
33
+ input_file = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset.json" # 替换为你的输入文件路径
34
+ output_file = "shuffled_data.json" # 替换为你的输出文件路径
35
+ output_txt_file = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset.txt" # 输出的 TXT 文件路径
36
+
37
+ # 加载数据
38
+ data = load_json(input_file)
39
+
40
+ # 打乱数据
41
+ random.shuffle(data)
42
+ print("Data shuffled successfully.")
43
+
44
+ # # 保存打乱后的数据
45
+ # save_to_json(data, output_file)
46
+
47
+ # 将数据中的 Question 字段保存到 TXT 文件
48
+ save_questions_to_txt(data, output_txt_file)
deep_search/data_syn/keywords_count.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import torch
4
+ from tqdm import tqdm
5
+ import ast
6
+ import re
7
+
8
+ keywords = ["who", "whom", "whose","where", "what", "when", "how", "why", "which", "whether", "is", "are", "do", "does", "was", "were"]
9
+
10
+ def save_to_json(data, filename):
11
+ with open(filename, 'w', encoding='utf-8') as f:
12
+ json.dump(data, f, ensure_ascii=False, indent=4)
13
+ print(f"save to {filename}, data len: {len(data)}")
14
+ def load_json(file_path):
15
+ with open(file_path, "r", encoding="utf-8") as f:
16
+ data = json.load(f)
17
+ print(f"load from {file_path}, data len: {len(data)}")
18
+ return data
19
+
20
+
21
+ def count_keywords(text, keywords):
22
+ """
23
+ 统计字符串中指定关键词的出现次数。
24
+
25
+ 参数:
26
+ text (str): 输入的字符串。
27
+ keywords (list): 需要统计的关键词列表。
28
+
29
+ 返回:
30
+ dict: 每个关键词及其对应的出现次数。
31
+ """
32
+ # 将文本转换为小写以忽略大小写
33
+ text = text.lower()
34
+
35
+ # 创建一个字典存储每个关键词的计数结果
36
+ keyword_counts = {}
37
+ total = 0
38
+ special_total = 0
39
+ for keyword in keywords:
40
+ # 使用正则表达式匹配独立的单词
41
+ pattern = r'\b' + re.escape(keyword.lower()) + r'\b'
42
+ matches = re.findall(pattern, text)
43
+ keyword_counts[keyword] = len(matches)
44
+ if keyword not in ["is", "are", "do", "does", "was", "were"]:
45
+ special_total += len(matches)
46
+ total += len(matches)
47
+ keyword_counts["total"] = total
48
+ keyword_counts["special_total"] = special_total
49
+ return keyword_counts
50
+
51
+
52
+
53
+ if __name__ == "__main__":
54
+ input_file_path = "/share/project/sunshuang/deep_search/data_for_rl/tagged_domain_keypoints/merged_data_tagged_domain_keypoints.json"
55
+ output_file_path = "/share/project/sunshuang/deep_search/data_for_rl/tagged_domain_keypoints/merged_data_tagged_domain_keypoints_keywords_count.json"
56
+ data = load_json(input_file_path)
57
+ for i in tqdm(range(len(data))):
58
+ text = data[i]["question"]
59
+ data[i]["keywords_count"] = count_keywords(text, keywords)
60
+ save_to_json(data, output_file_path)
deep_search/data_syn/merge.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ # 指定路径
5
+ base_path = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints"
6
+
7
+ # 用于存储合并后的数据
8
+ merged_data = []
9
+
10
+ # 遍历目录下的所有文件
11
+ for file in sorted(os.listdir(base_path)):
12
+ print(f"Processing file: {file}")
13
+ if file.endswith("tagged.json"):
14
+ if file.endswith("tagged.json"): # 筛选以 "tagged.json" 结尾的文件
15
+ file_path = os.path.join(base_path, file) # 获取完整路径
16
+ print(f"Processing file: {file_path}")
17
+
18
+ # 读取 JSON 文件内容并合并
19
+ with open(file_path, "r", encoding="utf-8") as f:
20
+ data = json.load(f)
21
+ merged_data.extend(data) # 将数据添加到合并列表中
22
+
23
+ # 输出合并后的数据到新文件(可选)
24
+ output_file = os.path.join(base_path, "merged_tagged.json")
25
+ with open(output_file, "w", encoding="utf-8") as out_f:
26
+ json.dump(merged_data, out_f, ensure_ascii=False, indent=4)
27
+
28
+ print(f"合并完成,共处理 {len(merged_data)} 条数据,结果已保存至 {output_file}")
deep_search/data_syn/merge_two_data.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from collections import defaultdict,OrderedDict, Counter
3
+ from tqdm import tqdm
4
+ import random
5
+ import matplotlib.pyplot as plt
6
+
7
+
8
+
9
+ def save_to_json(data, filename):
10
+ """保存数据到 JSON 文件"""
11
+ with open(filename, 'w', encoding='utf-8') as f:
12
+ json.dump(data, f, ensure_ascii=False, indent=4)
13
+ print(f"Saved to {filename}, data length: {len(data)}")
14
+
15
+ def load_json(file_path):
16
+ """从 JSON 文件加载数据"""
17
+ with open(file_path, "r", encoding="utf-8") as f:
18
+ data = json.load(f)
19
+ print(f"Loaded from {file_path}, data length: {len(data)}")
20
+ return data
21
+
22
+
23
+ file1 = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/final_dataset_new/final_selected_dataset.json"
24
+ file2 = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data/final_selected_dataset.json"
25
+
26
+ data1 = load_json(file1)
27
+ data2 = load_json(file2)
28
+
29
+ merged_data = data1 + data2
30
+
31
+ output_file = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/merged_4k/4k.json"
32
+
33
+ save_to_json(merged_data, output_file)
deep_search/data_syn/search_o1_data_syn.py ADDED
@@ -0,0 +1,937 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # run_search_o1.py
2
+ import os
3
+ import json
4
+ import time
5
+ import re
6
+ from tqdm import tqdm
7
+ import numpy as np
8
+ import torch
9
+ import string
10
+ from typing import Optional, Tuple, List, Dict
11
+ import argparse
12
+
13
+ from transformers import AutoTokenizer
14
+ from vllm import LLM, SamplingParams
15
+
16
+ from bing_search import (
17
+ bing_web_search,
18
+ extract_relevant_info,
19
+ fetch_page_content,
20
+ extract_snippet_with_context
21
+ )
22
+ from evaluate import (
23
+ run_evaluation,
24
+ extract_answer
25
+ )
26
+ from prompts import (
27
+ get_gpqa_search_o1_instruction,
28
+ get_math_search_o1_instruction,
29
+ get_code_search_o1_instruction,
30
+ get_singleqa_search_o1_instruction,
31
+ get_multiqa_search_o1_instruction,
32
+ get_webpage_to_reasonchain_instruction,
33
+ get_task_instruction_openqa,
34
+ get_task_instruction_math,
35
+ get_task_instruction_multi_choice,
36
+ get_task_instruction_code,
37
+ get_singleqa_search_o1_instruction_1,
38
+ get_multiqa_search_o1_instruction_1,
39
+ get_webpage_to_reasonchain_instruction_1,
40
+ get_math_search_o1_instruction_1,
41
+ )
42
+
43
+
44
+ from openai import OpenAI
45
+
46
+ from add_eval import add_eval
47
+ # Modify OpenAI's API key and API base to use vLLM's API server.
48
+ # 使用 vLLM 的 API 服务器需要修改 OpenAI 的 API 密钥和 API 库。
49
+
50
+ # openai_api_key = "EMPTY"
51
+ # openai_api_base = "http://localhost:8000/v1"
52
+ # client = OpenAI(
53
+ # api_key=openai_api_key,
54
+ # base_url=openai_api_base,
55
+ # )
56
+
57
+ # Define special tokens
58
+ BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
59
+ END_SEARCH_QUERY = "<|end_search_query|>"
60
+ BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
61
+ END_SEARCH_RESULT = "<|end_search_result|>"
62
+
63
+
64
+ # os.environ['http_proxy'] = 'http://127.0.0.1:7880'
65
+ # os.environ['https_proxy'] = 'http://127.0.0.1:7880'
66
+
67
+ # 增加了cache共享和has answer评测,truncate doc
68
+
69
+ def parse_args():
70
+ parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
71
+
72
+ # Dataset and split configuration
73
+ parser.add_argument(
74
+ '--dataset_name',
75
+ type=str,
76
+ required=True,
77
+ choices=['simpleqa', 'gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
78
+ help="Name of the dataset to use."
79
+ )
80
+
81
+ parser.add_argument(
82
+ '--split',
83
+ type=str,
84
+ required=True,
85
+ choices=['test', 'diamond', 'main', 'extended'],
86
+ help="Dataset split to use."
87
+ )
88
+
89
+ parser.add_argument(
90
+ '--subset_num',
91
+ type=int,
92
+ default=-1,
93
+ help="Number of examples to process. Defaults to all if not specified."
94
+ )
95
+
96
+ # Search and document retrieval configuration
97
+ parser.add_argument(
98
+ '--max_search_limit',
99
+ type=int,
100
+ default=10,
101
+ help="Maximum number of searches per question."
102
+ )
103
+
104
+ parser.add_argument(
105
+ '--max_turn',
106
+ type=int,
107
+ default=15,
108
+ help="Maximum number of turns."
109
+ )
110
+
111
+ parser.add_argument( # 使用搜索引擎时,返回的最大文档数
112
+ '--top_k',
113
+ type=int,
114
+ default=10,
115
+ help="Maximum number of search documents to return."
116
+ )
117
+
118
+ parser.add_argument(
119
+ '--max_doc_len',
120
+ type=int,
121
+ default=3000,
122
+ help="Maximum length of each searched document."
123
+ )
124
+
125
+ # parser.add_argument(
126
+ # '--use_jina',
127
+ # type=bool,
128
+ # default=False,
129
+ # help="Whether to use Jina API for document fetching."
130
+ # )
131
+ parser.add_argument(
132
+ '--use_jina',
133
+ action='store_true',
134
+ help="Whether to use Jina API for document fetching."
135
+ )
136
+
137
+ parser.add_argument(
138
+ '--jina_api_key',
139
+ type=str,
140
+ default='None',
141
+ help="Your Jina API Key to Fetch URL Content."
142
+ )
143
+
144
+ # Model configuration
145
+ parser.add_argument(
146
+ '--model_path',
147
+ type=str,
148
+ required=True,
149
+ help="Path to the pre-trained model."
150
+ )
151
+
152
+ # Sampling parameters
153
+ parser.add_argument(
154
+ '--temperature',
155
+ type=float,
156
+ default=0.7,
157
+ help="Sampling temperature."
158
+ )
159
+
160
+ parser.add_argument(
161
+ '--top_p',
162
+ type=float,
163
+ default=0.8,
164
+ help="Top-p sampling parameter."
165
+ )
166
+
167
+ parser.add_argument(
168
+ '--top_k_sampling',
169
+ type=int,
170
+ default=20,
171
+ help="Top-k sampling parameter."
172
+ )
173
+
174
+ parser.add_argument(
175
+ '--repetition_penalty',
176
+ type=float,
177
+ default=None,
178
+ help="Repetition penalty. If not set, defaults based on the model."
179
+ )
180
+
181
+ parser.add_argument(
182
+ '--max_tokens',
183
+ type=int,
184
+ default=32768,
185
+ help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
186
+ )
187
+
188
+ # Bing API Configuration
189
+ parser.add_argument(
190
+ '--bing_subscription_key',
191
+ type=str,
192
+ required=True,
193
+ help="Bing Search API subscription key."
194
+ )
195
+
196
+ parser.add_argument(
197
+ '--bing_endpoint',
198
+ type=str,
199
+ default="https://api.bing.microsoft.com/v7.0/search",
200
+ help="Bing Search API endpoint."
201
+ )
202
+
203
+ parser.add_argument(
204
+ '--cache_dir_base',
205
+ type=str,
206
+ required=True,
207
+ help="cache path."
208
+ )
209
+
210
+ parser.add_argument(
211
+ '--output_dir_base',
212
+ type=str,
213
+ required=True,
214
+ help="output_dir"
215
+ )
216
+
217
+ # parser.add_argument(
218
+ # '--model_doc_reason_path',
219
+ # type=str,
220
+ # required=True,
221
+ # help="Path to the document reasoning model."
222
+ # )
223
+
224
+ # openai_api_base
225
+ # parser.add_argument(
226
+ # '--openai_api_base',
227
+ # type=str,
228
+ # required=True,
229
+ # help="openai_api_base"
230
+ # )
231
+ # parser.add_argument(
232
+ # '--data_path',
233
+ # type=str,
234
+ # required=True,
235
+ # help="Path to the document reasoning model."
236
+ # )
237
+ return parser.parse_args()
238
+
239
+ def main():
240
+ args = parse_args()
241
+ print(f"args.use_jina: {args.use_jina}")
242
+ # Extract arguments
243
+ dataset_name = args.dataset_name
244
+ split = args.split
245
+ subset_num = args.subset_num
246
+ MAX_SEARCH_LIMIT = args.max_search_limit
247
+ MAX_TURN = args.max_turn
248
+ top_k = args.top_k
249
+ max_doc_len = args.max_doc_len
250
+ model_path = args.model_path
251
+ # model_doc_reason_path = args.model_doc_reason_path
252
+ temperature = args.temperature
253
+ top_p = args.top_p
254
+ top_k_sampling = args.top_k_sampling
255
+ repetition_penalty = args.repetition_penalty
256
+ max_tokens = args.max_tokens
257
+ bing_subscription_key = args.bing_subscription_key
258
+ bing_endpoint = args.bing_endpoint
259
+ use_jina = args.use_jina
260
+ jina_api_key = args.jina_api_key
261
+ cache_dir_base = args.cache_dir_base
262
+ output_dir_base = args.output_dir_base
263
+ # openai_api_base = args.openai_api_base
264
+ use_jina = False
265
+ print(f"use_jina: {use_jina}")
266
+
267
+ print(f"CUDA_VISIBLE_DEVICES is set to: {os.environ['CUDA_VISIBLE_DEVICES']}")
268
+
269
+ # openai_api_key = "EMPTY"
270
+ # openai_api_base = openai_api_base
271
+ # client = OpenAI(
272
+ # api_key=openai_api_key,
273
+ # base_url=openai_api_base,
274
+ # )
275
+
276
+ # Adjust parameters based on dataset
277
+ if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki', 'medmcqa', 'pubhealth']:
278
+ MAX_SEARCH_LIMIT = 5
279
+ if dataset_name in ['simpleqa' ,'hotpotqa', 'musique', 'bamboogle', '2wiki']:
280
+ MAX_SEARCH_LIMIT = 10
281
+ MAX_TURN = 15
282
+ top_k = 10
283
+ max_doc_len = 3000
284
+
285
+ if args.jina_api_key == 'None':
286
+ jina_api_key = None
287
+
288
+ # Set default repetition_penalty if not provided
289
+ if repetition_penalty is None:
290
+ repetition_penalty = 1.05 if 'qwq' in model_path.lower() else 1.0
291
+
292
+ # Data paths based on dataset
293
+ if split == "test": # 测试用的数据集地址
294
+ data_path = f"./data/test/{dataset_name}.json"
295
+ else: # 训练用的数据集地址
296
+ if dataset_name == 'livecode':
297
+ data_path = f'./data/LiveCodeBench/{split}.json'
298
+ elif dataset_name in ['math500', 'gpqa', 'aime', 'amc']:
299
+ data_path = f'./data/{dataset_name.upper()}/{split}.json'
300
+ else:
301
+ data_path = f'./data/QA_Datasets/{dataset_name}.json'
302
+
303
+ print('-----------------------')
304
+ print(f'Using {dataset_name} {split} set.')
305
+ print('-----------------------')
306
+
307
+ # ---------------------- Caching Mechanism ----------------------
308
+ # Define cache directories and file paths
309
+ # cache_dir = './cache'
310
+ model_name = model_path.split('/')[-1].replace('-instruct', '')
311
+ # cache_dir = f'./{cache_dir_base}_{dataset_name}_{model_name}'
312
+ cache_dir = cache_dir_base
313
+ search_cache_path = os.path.join(cache_dir, 'search_cache.json')
314
+ url_cache_path = os.path.join(cache_dir, 'url_cache.json')
315
+
316
+ # Ensure cache directory exists
317
+ os.makedirs(cache_dir, exist_ok=True)
318
+
319
+ # Load existing caches or initialize empty dictionaries
320
+ if os.path.exists(search_cache_path):
321
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
322
+ search_cache = json.load(f)
323
+ else:
324
+ search_cache = {}
325
+
326
+ if os.path.exists(url_cache_path):
327
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
328
+ url_cache = json.load(f)
329
+ else:
330
+ url_cache = {}
331
+
332
+ # Function to save caches
333
+ def save_caches():
334
+ with open(search_cache_path, 'w', encoding='utf-8') as f:
335
+ json.dump(search_cache, f, ensure_ascii=False, indent=2)
336
+ with open(url_cache_path, 'w', encoding='utf-8') as f:
337
+ json.dump(url_cache, f, ensure_ascii=False, indent=2)
338
+
339
+ # ---------------------- Model Loading ----------------------
340
+ print(f"Loading tokenizer from {model_path}...")
341
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
342
+ if tokenizer.pad_token is None:
343
+ tokenizer.pad_token = tokenizer.eos_token
344
+ tokenizer.padding_side = 'left' # 主要是左填充
345
+ print("Tokenizer loaded successfully.")
346
+
347
+ # Define output directory based on model and dataset
348
+
349
+
350
+ # if 'qwq' in model_path.lower():
351
+ # if dataset_name in ['math500', 'gpqa', 'aime', 'amc', 'livecode']:
352
+ # output_dir = f'./{output_dir_base}/{dataset_name}.qwq.search_o1'
353
+ # if dataset_name == 'gpqa' and (MAX_SEARCH_LIMIT != 5 or top_k != 10):
354
+ # output_dir = f'./{output_dir_base}/runs.analysis/{dataset_name}.qwq.search_o1.{MAX_SEARCH_LIMIT}.{top_k}'
355
+ # else:
356
+ # output_dir = f'./{output_dir_base}/runs.qa/{dataset_name}.qwq.search_o1'
357
+ # else:
358
+ # model_short_name = model_path.split('/')[-1].lower().replace('-instruct', '')
359
+ # output_dir = f'./{output_dir_base}/runs.baselines/{dataset_name}.{model_short_name}.search_o1'
360
+ output_dir = output_dir_base
361
+ os.makedirs(output_dir, exist_ok=True)
362
+
363
+ print(f"Loading model from {model_path}...")
364
+ print(f"device_count: {torch.cuda.device_count()}")
365
+
366
+ # Initialize the LLM
367
+ llm = LLM(
368
+ model=model_path,
369
+ tensor_parallel_size=torch.cuda.device_count(),
370
+ gpu_memory_utilization=0.95,
371
+
372
+ )
373
+ print("Model loaded successfully.")
374
+
375
+ # # ----------------------Loading model to reason in document ----------------------
376
+
377
+ # print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
378
+ # tokenizer_doc_reason = AutoTokenizer.from_pretrained(model_doc_reason_path, trust_remote_code=True)
379
+ # if tokenizer_doc_reason.pad_token is None:
380
+ # tokenizer_doc_reason.pad_token = tokenizer_doc_reason.eos_token
381
+ # tokenizer_doc_reason.padding_side = 'left' # 主要是左填充
382
+ # print("tokenizer_doc_reason loaded successfully.")
383
+
384
+ # print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
385
+
386
+ # # Initialize the LLM
387
+ # # torch.cuda.set_device(6,7)
388
+
389
+ # llm_doc_reason = LLM(
390
+ # model=model_doc_reason_path,
391
+ # tensor_parallel_size=2,
392
+ # gpu_memory_utilization=0.95,
393
+
394
+ # )
395
+ # print("Model_doc_reason loaded successfully.")
396
+
397
+ # ---------------------- Data Loading ----------------------
398
+ print(f"Loading data from {data_path}...")
399
+ with open(data_path, 'r', encoding='utf-8') as json_file:
400
+ filtered_data = json.load(json_file)
401
+ print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
402
+
403
+ # ---------------------- Batch Generation Function ----------------------
404
+ def generate_webpage_to_reasonchain_batch( # 模型基于网页内容生成推理,然后从模型的回复中提取答案
405
+ original_questions: List[str],
406
+ prev_reasonings: List[str],
407
+ search_queries: List[str],
408
+ documents: List[str],
409
+ dataset_name: str,
410
+ batch_output_records: List[Dict], # New parameter to collect outputs
411
+ max_tokens: int = 32768,
412
+ coherent: bool = False,
413
+ ) -> List[str]:
414
+
415
+ if "Qwen2.5" in model_path:
416
+ max_tokens = 8192
417
+
418
+ encode_docs = tokenizer(documents, truncation=True, max_length=20000, add_special_tokens=False)["input_ids"]
419
+ documents = tokenizer.batch_decode(encode_docs)
420
+
421
+ # 统计每个文档的长度
422
+ doc_lengths = [len(doc) for doc in encode_docs]
423
+
424
+ # # 打印每个文档的长度
425
+ # for i, length in enumerate(doc_lengths):
426
+ # print(f"Document {i + 1}: {length} tokens")
427
+
428
+ # 如果需要返回长度列表,可以直接使用 doc_lengths
429
+ print(f"for {model_path}, set max_tokens={max_tokens} for doc gen, truncate documnets. ")
430
+ print("All document lengths:", doc_lengths)
431
+
432
+ assert len(documents) / len(search_queries) == top_k, "Number of documents should be equal to top_k * number of search queries."
433
+
434
+ for i, doc in enumerate(documents):
435
+ query_idx = i // top_k # 每 query 有 top_k 条文档
436
+ question = original_questions[query_idx]
437
+ reasoning = prev_reasonings[query_idx]
438
+ query = search_queries[query_idx]
439
+
440
+ # 拼装用户提示
441
+ up = get_webpage_to_reasonchain_instruction(
442
+ reasoning, # 之前的推理
443
+ query, # 该文档对应的搜索query
444
+ doc # 当前文档内容
445
+ )
446
+ user_prompts.append(up)
447
+
448
+
449
+ user_prompts = [ # 根据之前的推理,搜索query和搜索到的doc生成用户提示
450
+ get_webpage_to_reasonchain_instruction(r, sq, doc)
451
+ for r, sq, doc in zip(prev_reasonings, search_queries, documents)
452
+ ]
453
+
454
+ prompts = [{"role": "user", "content": up} for up in user_prompts]
455
+ prompts = [tokenizer.apply_chat_template([p], tokenize=False, add_generation_prompt=True) for p in prompts]
456
+
457
+ output = llm.generate( # 生成模型回复
458
+ prompts,
459
+ sampling_params=SamplingParams(
460
+ max_tokens=max_tokens,
461
+ temperature=0.7,
462
+ top_p=0.8,
463
+ top_k=20,
464
+ repetition_penalty=1.05,
465
+ )
466
+ )
467
+
468
+ raw_outputs = [out.outputs[0].text for out in output]
469
+ extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs] # 提取模型基于网页生成的推理
470
+
471
+ for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
472
+ batch_output_records.append({
473
+ 'prompt': p,
474
+ 'raw_output': r,
475
+ 'extracted_info': e
476
+ })
477
+
478
+ return extracted_infos
479
+
480
+ # ---------------------- Preparation of Input Prompts ----------------------
481
+ input_list = []
482
+ for item in filtered_data: # 生成prompts
483
+ question = item['Question']
484
+
485
+ if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
486
+ if dataset_name in ['nq', 'triviaqa']:
487
+ instruction = get_singleqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
488
+ elif dataset_name in ['simpleqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
489
+ instruction = get_multiqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
490
+ if 'qwq' in model_path.lower():
491
+ user_prompt = get_task_instruction_openqa(question, model_name='qwq')
492
+ else:
493
+ user_prompt = get_task_instruction_openqa(question)
494
+
495
+ elif dataset_name in ['math500', 'aime', 'amc']:
496
+ instruction = get_math_search_o1_instruction_1(MAX_SEARCH_LIMIT)
497
+ if 'qwq' in model_path.lower():
498
+ user_prompt = get_task_instruction_math(question, model_name='qwq')
499
+ else:
500
+ user_prompt = get_task_instruction_math(question)
501
+
502
+ elif dataset_name == 'gpqa':
503
+ instruction = get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT)
504
+ if 'qwq' in model_path.lower():
505
+ user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
506
+ elif 'llama' in model_path.lower():
507
+ user_prompt = get_task_instruction_multi_choice(question, model_name='llama')
508
+ else:
509
+ user_prompt = get_task_instruction_multi_choice(question)
510
+
511
+ elif dataset_name == 'livecode':
512
+ instruction = get_code_search_o1_instruction(MAX_SEARCH_LIMIT)
513
+ question_title = item.get('question_title', '')
514
+ if 'qwq' in model_path.lower():
515
+ user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
516
+ else:
517
+ user_prompt = get_task_instruction_code(question)
518
+ else:
519
+ user_prompt = "" # Default to empty if dataset not matched
520
+
521
+ prompt = [{"role": "user", "content": instruction + user_prompt}] # instruction是告诉模型怎么进行搜索,user_prompt是用户具体问题
522
+ prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
523
+ input_list.append(prompt)
524
+
525
+ if subset_num != -1:
526
+ input_list = input_list[:subset_num]
527
+ filtered_data = filtered_data[:subset_num]
528
+
529
+ # Initialize active sequences
530
+ active_sequences = [{ # 记录每个问题的搜索历史
531
+ 'item': item,
532
+ 'prompt': prompt,
533
+ 'output': '',
534
+ 'finished': False, # 一开始均为未完成
535
+ 'history': [],
536
+ 'search_count': 0,
537
+ 'executed_search_queries': set(),
538
+ 'all_info': [],
539
+ } for item, prompt in zip(filtered_data, input_list)]
540
+
541
+ # ---------------------- Set Max Tokens ----------------------
542
+ # if 'qwq' in model_path.lower():
543
+ # if dataset_name in ['aime', 'amc', 'livecode']:
544
+ # max_tokens = 32768
545
+ # else:
546
+ # max_tokens = 20480
547
+ # else:
548
+ # max_tokens = 8192
549
+ # max_tokens = 16384
550
+ if dataset_name in ['aime', 'amc', 'livecode']:
551
+ max_tokens = 32768
552
+ else:
553
+ max_tokens = 20480
554
+ # ---------------------- Generation Function ----------------------
555
+ def run_generation(sequences: List[Dict], max_tokens: int) -> List:
556
+ prompts = [s['prompt'] for s in sequences] # 提取prompt
557
+ sampling_params = SamplingParams(
558
+ max_tokens=max_tokens,
559
+ temperature=temperature,
560
+ top_p=top_p,
561
+ top_k=top_k_sampling,
562
+ repetition_penalty=repetition_penalty,
563
+ stop=[END_SEARCH_QUERY, tokenizer.eos_token],
564
+ include_stop_str_in_output=True,
565
+ )
566
+ output_list = llm.generate(prompts, sampling_params=sampling_params) # 模型根据prompt生成回答
567
+ print(f"run_generation completed {len(output_list)}")
568
+ return output_list
569
+
570
+ # Function to extract text between two tags 提取位于 start_tag 和 end_tag 之间的内容
571
+ def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
572
+ pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
573
+ matches = re.findall(pattern, text, flags=re.DOTALL)
574
+ if matches:
575
+ return matches[-1].strip()
576
+ return None
577
+
578
+ def replace_recent_steps(origin_str, replace_str): # 使用replace_str更新origin_str
579
+ """
580
+ Replaces specific steps in the original reasoning steps with new steps.
581
+ If a replacement step contains "DELETE THIS STEP", that step is removed.
582
+
583
+ Parameters:
584
+ - origin_str (str): The original reasoning steps.
585
+ - replace_str (str): The steps to replace or delete.
586
+
587
+ Returns:
588
+ - str: The updated reasoning steps after applying replacements.
589
+ 这个函数的主要功能是替换给定的推理步骤(reasoning steps),
590
+ 并根据传入的 replace_str 进行更新。
591
+ 如果新的步骤包含 "DELETE THIS STEP",则删除该步骤
592
+ """
593
+
594
+ def parse_steps(text):
595
+ """
596
+ Parses the reasoning steps from a given text.
597
+
598
+ Parameters:
599
+ - text (str): The text containing reasoning steps.
600
+
601
+ Returns:
602
+ - dict: A dictionary mapping step numbers to their content.
603
+ """
604
+ step_pattern = re.compile(r"Step\s+(\d+):\s*") # 这个模式会匹配 "Step" 后面跟一个或多个空格,然后是一个数字(步骤编号),最后是冒号
605
+ steps = {}
606
+ current_step_num = None
607
+ current_content = []
608
+
609
+ for line in text.splitlines(): # 将输入的文本按行分割,并逐行遍历。每一行会被检查是否包含一个步骤
610
+ step_match = step_pattern.match(line)
611
+ if step_match: # 匹配到一个新的步骤
612
+ # If there's an ongoing step, save its content,如果当前的步骤不为空,将其为上一个步骤,先将上一个步骤的内容(存在current_content中)保存,然后再更新current_step_num和current_content
613
+ if current_step_num is not None:
614
+ steps[current_step_num] = "\n".join(current_content).strip()
615
+ current_step_num = int(step_match.group(1))
616
+ content = line[step_match.end():].strip()
617
+ current_content = [content] if content else []
618
+ else:
619
+ if current_step_num is not None:
620
+ current_content.append(line)
621
+
622
+ # Save the last step if any
623
+ if current_step_num is not None: # 保存最后一个步骤
624
+ steps[current_step_num] = "\n".join(current_content).strip()
625
+
626
+ return steps
627
+
628
+ # Parse the original and replacement steps
629
+ origin_steps = parse_steps(origin_str) # 解析原始的推理步骤
630
+ replace_steps = parse_steps(replace_str) # 解析要替换的推理步骤
631
+
632
+ # Apply replacements
633
+ for step_num, content in replace_steps.items(): # 遍历要替换的步骤
634
+ if "DELETE THIS STEP" in content:
635
+ # Remove the step if it exists
636
+ if step_num in origin_steps: # 如果要删除的步骤在原始的推理步骤中存在,则删除该步骤
637
+ del origin_steps[step_num]
638
+ else: # 如果要替换的步骤不是要删除的步骤,则替换该步骤
639
+ # Replace or add the step
640
+ origin_steps[step_num] = content
641
+
642
+ # Sort the steps by step number
643
+ sorted_steps = sorted(origin_steps.items()) # 按照步骤编号对步骤进行排序
644
+
645
+ # Reconstruct the reasoning steps as a single string
646
+ new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps]) # 根据排序后的步骤构建新的推理步骤字符串,步骤之间以两个换行符分隔
647
+
648
+ return new_reasoning_steps
649
+
650
+ # ---------------------- Initialize Collection Structure ----------------------
651
+ # Initialize a list to collect batch outputs
652
+ batch_output_records = []
653
+
654
+ start_time = time.time()
655
+ turn = 0
656
+
657
+ # 流程
658
+ # 首先根据prompt让模型生成回复
659
+ # 从模型的回复中提取搜索查询
660
+ # 如果有(回复要以END_SEARCH_QUERY结尾)
661
+ # 根据搜索查询,从互联网上提取相关信息
662
+ # 处理查询的信息
663
+ # 让模型基于之前的步骤,检索query和查询到的信息生成新的推理,得到search result
664
+ # 然后回到第一步(这里模型就会根据前面的search result,再次生成新的回复
665
+ # 如果没有查询则该条问题结束
666
+
667
+ # Main loop until all sequences are finished or maximum turns reached
668
+ while True:
669
+ # Identify sequences that need generation
670
+ sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']] # 筛选出需要生成的新内容的序列,active_sequences 是一个包含所有活跃序列的列表
671
+
672
+ if sequences_needing_generation:
673
+ turn += 1
674
+ print(f'\n-------------- Turn {turn} --------------')
675
+ print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
676
+ outputs = run_generation(sequences_needing_generation, max_tokens) # 根据prompt
677
+ print("Generation completed, processing outputs...")
678
+
679
+ # Initialize batch variables
680
+ batch_relevant_info = []
681
+ batch_original_questions = []
682
+ batch_prev_reasonings = []
683
+ batch_search_queries = []
684
+ batch_documents = []
685
+ batch_sequences = []
686
+
687
+ # Collect URLs to fetch across all sequences
688
+ all_urls_to_fetch = set() # 初始化一个集合 all_urls_to_fetch 用来收集所有需要获取的 URL
689
+ url_snippets = {}
690
+ url_sequence_map = {} # Map URL to list of sequences needing it
691
+
692
+ # Process each sequence and collect URLs
693
+ for seq, out in zip(sequences_needing_generation, outputs): # 遍历需要生成新内容的序列,并生成新内容,同时收集需要获取的 URL
694
+ text = out.outputs[0].text # 将生成的文本添加到序列的历史记录、提示和输出中
695
+ seq['history'].append(text)
696
+ # Append generated text to prompt and output
697
+ seq['prompt'] += text
698
+ seq['output'] += text
699
+ seq['all_info'].append({f"turn_{turn}_reason": text})
700
+ # Extract search query
701
+ search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY) # 提取搜索查询
702
+
703
+ # If a search query is present and needs to be executed
704
+ if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
705
+ if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
706
+ # Execute search, use cache if available
707
+ if search_query in search_cache:
708
+ results = search_cache[search_query] # 如果搜索查询结果在缓存中存在,则从缓存中获取结果
709
+ print(f"Using cached search results for query: \"{search_query}\"")
710
+ else:
711
+ try:
712
+ print(f"Execute and cache search for query: \"{search_query}\"")
713
+ results = bing_web_search(search_query, bing_subscription_key, bing_endpoint, market='en-US', language='en') # 执行搜索
714
+ search_cache[search_query] = results # 将搜索结果添加到缓存中
715
+ print(f"Executed and cached search for query: \"{search_query}\"")
716
+ except Exception as e:
717
+ print(f"Error during search query '{search_query}': {e}")
718
+ search_cache[search_query] = {}
719
+ results = {}
720
+
721
+ # Extract relevant information from Bing search results
722
+ relevant_info = extract_relevant_info(results)[:top_k] # 从搜索结果中提取出最相关的信息
723
+ # relevant_info['search_query'] = search_query
724
+ seq['relevant_info'] = relevant_info
725
+
726
+ # Extract URLs and snippets
727
+ urls_to_fetch = [it['url'] for it in relevant_info] # 从搜索结果中提取出所有 URL
728
+ snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info} # 创建一个字典 snippets,将 URL 映射到它们对应的片段(snippet)。如果 snippet 字段存在,则把它加入字典
729
+
730
+ # Filter URLs that are not cached
731
+ urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache] # 筛选出所有没有被缓存的 UR
732
+ cached_urls = [u for u in urls_to_fetch if u in url_cache] # 选出已经缓存的 URL(即在 url_cache 中存在的 URL)。这些 URL 已经被处理过,不需要再次请求
733
+
734
+ # Store info for all_urls_to_fetch and url_snippets
735
+ for url in urls_to_fetch_filtered:
736
+ all_urls_to_fetch.add(url)
737
+ url_snippets[url] = snippets.get(url, "") # 将每个 URL 对应的片段存储到 url_snippets 字典中
738
+
739
+ all_reasoning_steps = seq['output']
740
+ all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n") # 将连续的空行(\n\n)替换为单个换行符(\n),然后按换行符拆分文本,得到每个推理步骤单独的一行
741
+
742
+ truncated_prev_reasoning = ""
743
+ for i, step in enumerate(all_reasoning_steps):
744
+ truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n" # 遍历 all_reasoning_steps 中的每个步骤,并将每个步骤编号和步骤内容格式化后,添加到 truncated_prev_reasoning 字符串中。这样生成一个包含步骤编号和内容的字符串
745
+
746
+ prev_steps = truncated_prev_reasoning.split('\n\n') # 将推理步骤字符串 truncated_prev_reasoning 按照每两个换行符拆分成多个步骤
747
+ if len(prev_steps) <= 5: # 如果步骤的数量不超过 5,直接保留所有步骤
748
+ truncated_prev_reasoning = '\n\n'.join(prev_steps)
749
+ else:
750
+ truncated_prev_reasoning = ''
751
+ for i, step in enumerate(prev_steps): # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
752
+ if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
753
+ truncated_prev_reasoning += step + '\n\n'
754
+ else: # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
755
+ if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
756
+ truncated_prev_reasoning += '...\n\n'
757
+ truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
758
+
759
+ # Collect parameters for batch processing
760
+ batch_relevant_info.append(relevant_info) # 搜索出来的信息
761
+ batch_original_questions.append(seq['item']['Question']) # 原始问题
762
+ batch_prev_reasonings.append(truncated_prev_reasoning) # 之前的推理步骤
763
+ batch_search_queries.append(search_query) # 搜索查询
764
+ batch_sequences.append(seq)
765
+
766
+ # Update search count and executed queries
767
+ seq['search_count'] += 1 # 更新搜索计数
768
+ seq['executed_search_queries'].add(search_query) # 将已执行的搜索查询添加到集合中
769
+
770
+ elif seq['search_count'] >= MAX_SEARCH_LIMIT: # 如果搜索次数达到或超过该限制,则返回一条消息,通知该查询无法再进行
771
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
772
+ seq['prompt'] += limit_message
773
+ seq['output'] += limit_message
774
+ seq['history'].append(limit_message)
775
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
776
+ print(f"Search limit reached for query: \"{search_query}\"")
777
+
778
+ elif search_query in seq['executed_search_queries']: # 如果当前查询已经执行过,则返回一个消息,提示用户查询已重复,并引导其查看之前的结果
779
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
780
+ seq['prompt'] += limit_message
781
+ seq['output'] += limit_message
782
+ seq['history'].append(limit_message)
783
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
784
+ print(f"Repeated search for query: \"{search_query}\"")
785
+
786
+
787
+ else: # 如果当前序列没有需要执行的搜索查询,则将该序列标记为完成,并打印提示信息
788
+ # If no search query needs to be executed, mark the sequence as finished
789
+ seq['finished'] = True
790
+ print("Sequence marked as complete.")
791
+
792
+ print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
793
+ print(f"all_urls_to_fetch: {all_urls_to_fetch}")
794
+ # Batch fetch all URLs at once to optimize speed
795
+
796
+ if all_urls_to_fetch:
797
+ print(f"Fetching {len(all_urls_to_fetch)} URLs...")
798
+ try:
799
+ fetched_contents = fetch_page_content( # 一次性获取所有 URL 的中搜索出来的内容
800
+ list(all_urls_to_fetch),
801
+ use_jina=use_jina,
802
+ jina_api_key=jina_api_key,
803
+ # snippets=url_snippets # Do not pass snippets when updating url_cache directly
804
+ )
805
+ print(f"Fetched {len(fetched_contents)} URLs successfully.")
806
+ except Exception as e:
807
+ print(f"Error during batch URL fetching: {e}")
808
+ fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
809
+ # Update cache with fetched contents
810
+ for url, content in fetched_contents.items(): # 将获取的内容添加到 url_cache 中
811
+ url_cache[url] = content
812
+
813
+ # After fetching, prepare formatted documents for batch processing
814
+ for relevant_info in batch_relevant_info:
815
+ formatted_documents = "" # 初始化一个空字符串 formatted_documents,用于拼接本次要处理的所有网页信息。后面会将其添加到 batch_documents 列表中
816
+ for i, doc_info in enumerate(relevant_info):
817
+ url = doc_info['url']
818
+ raw_context = url_cache.get(url, "") # 获取 url 对应的内容
819
+ doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
820
+ success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
821
+ if success:
822
+ print("Extracted snippet with context successfully.")
823
+ context = filtered_context
824
+ else: # 否则就取 raw_context 的前 max_doc_len * 2 个字符,作为一个有限的片段,避免过长导致后续处理负担
825
+ print("Failed to extract snippet with context, using raw context.")
826
+ context = raw_context[:max_doc_len*2]
827
+
828
+ doc_info['context'] = context
829
+ formatted_documents += f"**Web Page {i + 1}:**\n"
830
+ formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
831
+
832
+ batch_documents.append(formatted_documents) # 将本组搜索结果的所有信息(拼接成的字符串 formatted_documents)添加到 batch_documents 列表中
833
+
834
+ # After fetching, prepare for batch processing if there are any
835
+ if batch_sequences:
836
+ print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
837
+ webpage_analyses = generate_webpage_to_reasonchain_batch( # 根据前面处理好的结果,生成新的推理
838
+ original_questions=batch_original_questions,
839
+ prev_reasonings=batch_prev_reasonings,
840
+ search_queries=batch_search_queries,
841
+ documents=batch_documents,
842
+ dataset_name=dataset_name,
843
+ batch_output_records=batch_output_records, # Pass the collection list
844
+ max_tokens=max_tokens,
845
+ )
846
+ print("Batch generation completed, assigning outputs to sequences...")
847
+
848
+ # 拼接同一个query总结的结果
849
+
850
+ for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents): # 遍历批处理返回的 webpage_analyses,将处理结果与相应的序列 seq 进行一一对应
851
+ if isinstance(analysis, str): # 判断 analysis 是否是纯字符串。如果是字符串,说明直接可以追加到序列的文本中
852
+ append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n" # 封装处理结果,添加到序列的历史记录、提示和输出中
853
+ seq['prompt'] += append_text
854
+ seq['output'] += append_text
855
+ seq['history'].append(append_text) # 存的是每一次的webpage_analyses
856
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
857
+ else: # 如果 analysis 不是纯字符串,那么可能是一种特殊的结构,比如表示需要替换推理步骤的 dict 或其他格式
858
+ append_text = replace_recent_steps(seq['output'], analysis)
859
+ seq['prompt'] += append_text
860
+ seq['output'] += append_text
861
+ seq['history'].append(append_text)
862
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
863
+
864
+ # Check if all sequences are finished
865
+ # 保存active_sequences
866
+ active_sequences_part = [{ # 记录每个问题的搜索历史
867
+ 'item': ele["item"],
868
+ 'prompt': ele['prompt'],
869
+ 'output': ele["output"],
870
+ 'finished': ele["finished"], # 一开始均为未完成
871
+ 'history':ele["history"],
872
+ 'search_count': ele["search_count"],
873
+ 'all_info': ele['all_info']
874
+ } for ele in active_sequences]
875
+ with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
876
+ json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
877
+ unfinished = [seq for seq in active_sequences if not seq['finished']] # 是否结束是基于模型是否生成了新的搜索
878
+ if not unfinished:
879
+ break
880
+ else:
881
+ if turn >= MAX_TURN:
882
+ print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
883
+ break
884
+
885
+ total_time = time.time() - start_time
886
+ print(f"Total time taken: {total_time} seconds")
887
+
888
+ # ---------------------- Save Batch Output Records to JSON File ----------------------
889
+ # Define output JSON file path
890
+ t = time.localtime()
891
+ batch_output_file = os.path.join(output_dir, f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
892
+
893
+ # Save batch_output_records to JSON file
894
+ with open(batch_output_file, 'w', encoding='utf-8') as f: # 这里存的是webpage推理时的输入和输出和提取后的信息
895
+ json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
896
+
897
+ print(f"Batch outputs saved to {batch_output_file}")
898
+
899
+ # Prepare output list for evaluation
900
+ output_list = [seq['output'] for seq in active_sequences]
901
+
902
+ # Run evaluation
903
+ run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
904
+
905
+ # 评测has answer信息
906
+ turn_files = os.listdir(output_dir)
907
+ turn_files = [file for file in turn_files if file.startswith("turn_")]
908
+ max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
909
+
910
+ max_turn_file_path = os.path.join(output_dir, max_turn_file)
911
+ print(f"max_turn_file_path: {max_turn_file_path}")
912
+ add_eval(model_path, max_turn_file_path)
913
+
914
+ # ---------------------- Update Search and URL Cache ----------------------
915
+ print('Updating Search and URL Cache...')
916
+ # Load existing caches or initialize empty dictionaries
917
+ if os.path.exists(search_cache_path):
918
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
919
+ search_cache_new = json.load(f)
920
+ else:
921
+ search_cache_new = {}
922
+
923
+ if os.path.exists(url_cache_path):
924
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
925
+ url_cache_new = json.load(f)
926
+ else:
927
+ url_cache_new = {}
928
+
929
+ search_cache.update(search_cache_new)
930
+ url_cache.update(url_cache_new)
931
+
932
+ save_caches()
933
+
934
+ print("Process completed.")
935
+
936
+ if __name__ == "__main__":
937
+ main()
deep_search/data_syn/search_o1_data_syn_sum_single_page.py ADDED
@@ -0,0 +1,953 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # run_search_o1.py
2
+ import os
3
+ import json
4
+ import time
5
+ import re
6
+ from tqdm import tqdm
7
+ import numpy as np
8
+ import torch
9
+ import string
10
+ from typing import Optional, Tuple, List, Dict
11
+ import argparse
12
+
13
+ from transformers import AutoTokenizer
14
+ from vllm import LLM, SamplingParams
15
+
16
+ from bing_search import (
17
+ bing_web_search,
18
+ extract_relevant_info,
19
+ fetch_page_content,
20
+ extract_snippet_with_context
21
+ )
22
+ from evaluate import (
23
+ run_evaluation,
24
+ extract_answer
25
+ )
26
+ from prompts import (
27
+ get_gpqa_search_o1_instruction,
28
+ get_math_search_o1_instruction,
29
+ get_code_search_o1_instruction,
30
+ get_singleqa_search_o1_instruction,
31
+ get_multiqa_search_o1_instruction,
32
+ get_webpage_to_reasonchain_instruction,
33
+ get_task_instruction_openqa,
34
+ get_task_instruction_math,
35
+ get_task_instruction_multi_choice,
36
+ get_task_instruction_code,
37
+ get_singleqa_search_o1_instruction_1,
38
+ get_multiqa_search_o1_instruction_1,
39
+ get_webpage_to_reasonchain_instruction_1,
40
+ get_math_search_o1_instruction_1,
41
+ )
42
+
43
+
44
+ from openai import OpenAI
45
+
46
+ from add_eval import add_eval
47
+ # Modify OpenAI's API key and API base to use vLLM's API server.
48
+ # 使用 vLLM 的 API 服务器需要修改 OpenAI 的 API 密钥和 API 库。
49
+
50
+ # openai_api_key = "EMPTY"
51
+ # openai_api_base = "http://localhost:8000/v1"
52
+ # client = OpenAI(
53
+ # api_key=openai_api_key,
54
+ # base_url=openai_api_base,
55
+ # )
56
+
57
+ # Define special tokens
58
+ BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
59
+ END_SEARCH_QUERY = "<|end_search_query|>"
60
+ BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
61
+ END_SEARCH_RESULT = "<|end_search_result|>"
62
+
63
+
64
+ # os.environ['http_proxy'] = 'http://127.0.0.1:7880'
65
+ # os.environ['https_proxy'] = 'http://127.0.0.1:7880'
66
+
67
+ # 增加了cache共享和has answer评测,truncate doc
68
+
69
+ def parse_args():
70
+ parser = argparse.ArgumentParser(description="Run Search O1 for various datasets and models.")
71
+
72
+ # Dataset and split configuration
73
+ parser.add_argument(
74
+ '--dataset_name',
75
+ type=str,
76
+ required=True,
77
+ choices=['simpleqa', 'gpqa', 'math500', 'aime', 'amc', 'livecode', 'nq', 'triviaqa', 'hotpotqa', '2wiki', 'musique', 'bamboogle'],
78
+ help="Name of the dataset to use."
79
+ )
80
+
81
+ parser.add_argument(
82
+ '--split',
83
+ type=str,
84
+ required=True,
85
+ choices=['test', 'diamond', 'main', 'extended'],
86
+ help="Dataset split to use."
87
+ )
88
+
89
+ parser.add_argument(
90
+ '--subset_num',
91
+ type=int,
92
+ default=-1,
93
+ help="Number of examples to process. Defaults to all if not specified."
94
+ )
95
+
96
+ # Search and document retrieval configuration
97
+ parser.add_argument(
98
+ '--max_search_limit',
99
+ type=int,
100
+ default=10,
101
+ help="Maximum number of searches per question."
102
+ )
103
+
104
+ parser.add_argument(
105
+ '--max_turn',
106
+ type=int,
107
+ default=15,
108
+ help="Maximum number of turns."
109
+ )
110
+
111
+ parser.add_argument( # 使用搜索引擎时,返回的最大文档数
112
+ '--top_k',
113
+ type=int,
114
+ default=10,
115
+ help="Maximum number of search documents to return."
116
+ )
117
+
118
+ parser.add_argument(
119
+ '--max_doc_len',
120
+ type=int,
121
+ default=3000,
122
+ help="Maximum length of each searched document."
123
+ )
124
+
125
+ # parser.add_argument(
126
+ # '--use_jina',
127
+ # type=bool,
128
+ # default=False,
129
+ # help="Whether to use Jina API for document fetching."
130
+ # )
131
+ parser.add_argument(
132
+ '--use_jina',
133
+ action='store_true',
134
+ help="Whether to use Jina API for document fetching."
135
+ )
136
+
137
+ parser.add_argument(
138
+ '--jina_api_key',
139
+ type=str,
140
+ default='None',
141
+ help="Your Jina API Key to Fetch URL Content."
142
+ )
143
+
144
+ # Model configuration
145
+ parser.add_argument(
146
+ '--model_path',
147
+ type=str,
148
+ required=True,
149
+ help="Path to the pre-trained model."
150
+ )
151
+
152
+ # Sampling parameters
153
+ parser.add_argument(
154
+ '--temperature',
155
+ type=float,
156
+ default=0.7,
157
+ help="Sampling temperature."
158
+ )
159
+
160
+ parser.add_argument(
161
+ '--top_p',
162
+ type=float,
163
+ default=0.8,
164
+ help="Top-p sampling parameter."
165
+ )
166
+
167
+ parser.add_argument(
168
+ '--top_k_sampling',
169
+ type=int,
170
+ default=20,
171
+ help="Top-k sampling parameter."
172
+ )
173
+
174
+ parser.add_argument(
175
+ '--repetition_penalty',
176
+ type=float,
177
+ default=None,
178
+ help="Repetition penalty. If not set, defaults based on the model."
179
+ )
180
+
181
+ parser.add_argument(
182
+ '--max_tokens',
183
+ type=int,
184
+ default=32768,
185
+ help="Maximum number of tokens to generate. If not set, defaults based on the model and dataset."
186
+ )
187
+
188
+ # Bing API Configuration
189
+ parser.add_argument(
190
+ '--bing_subscription_key',
191
+ type=str,
192
+ required=True,
193
+ help="Bing Search API subscription key."
194
+ )
195
+
196
+ parser.add_argument(
197
+ '--bing_endpoint',
198
+ type=str,
199
+ default="https://api.bing.microsoft.com/v7.0/search",
200
+ help="Bing Search API endpoint."
201
+ )
202
+
203
+ parser.add_argument(
204
+ '--cache_dir_base',
205
+ type=str,
206
+ required=True,
207
+ help="cache path."
208
+ )
209
+
210
+ parser.add_argument(
211
+ '--output_dir_base',
212
+ type=str,
213
+ required=True,
214
+ help="output_dir"
215
+ )
216
+
217
+ # parser.add_argument(
218
+ # '--model_doc_reason_path',
219
+ # type=str,
220
+ # required=True,
221
+ # help="Path to the document reasoning model."
222
+ # )
223
+
224
+ # openai_api_base
225
+ # parser.add_argument(
226
+ # '--openai_api_base',
227
+ # type=str,
228
+ # required=True,
229
+ # help="openai_api_base"
230
+ # )
231
+ # parser.add_argument(
232
+ # '--data_path',
233
+ # type=str,
234
+ # required=True,
235
+ # help="Path to the document reasoning model."
236
+ # )
237
+ return parser.parse_args()
238
+
239
+ def main():
240
+ args = parse_args()
241
+ print(f"args.use_jina: {args.use_jina}")
242
+ # Extract arguments
243
+ dataset_name = args.dataset_name
244
+ split = args.split
245
+ subset_num = args.subset_num
246
+ MAX_SEARCH_LIMIT = args.max_search_limit
247
+ MAX_TURN = args.max_turn
248
+ top_k = args.top_k
249
+ max_doc_len = args.max_doc_len
250
+ model_path = args.model_path
251
+ # model_doc_reason_path = args.model_doc_reason_path
252
+ temperature = args.temperature
253
+ top_p = args.top_p
254
+ top_k_sampling = args.top_k_sampling
255
+ repetition_penalty = args.repetition_penalty
256
+ max_tokens = args.max_tokens
257
+ bing_subscription_key = args.bing_subscription_key
258
+ bing_endpoint = args.bing_endpoint
259
+ use_jina = args.use_jina
260
+ jina_api_key = args.jina_api_key
261
+ cache_dir_base = args.cache_dir_base
262
+ output_dir_base = args.output_dir_base
263
+ # openai_api_base = args.openai_api_base
264
+ use_jina = False
265
+ print(f"use_jina: {use_jina}")
266
+
267
+ print(f"CUDA_VISIBLE_DEVICES is set to: {os.environ['CUDA_VISIBLE_DEVICES']}")
268
+
269
+ # openai_api_key = "EMPTY"
270
+ # openai_api_base = openai_api_base
271
+ # client = OpenAI(
272
+ # api_key=openai_api_key,
273
+ # base_url=openai_api_base,
274
+ # )
275
+
276
+ # Adjust parameters based on dataset
277
+ if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki', 'medmcqa', 'pubhealth']:
278
+ MAX_SEARCH_LIMIT = 5
279
+ if dataset_name in ['simpleqa' ,'hotpotqa', 'musique', 'bamboogle', '2wiki']:
280
+ MAX_SEARCH_LIMIT = 10
281
+ MAX_TURN = 15
282
+ top_k = 10
283
+ max_doc_len = 3000
284
+
285
+ if args.jina_api_key == 'None':
286
+ jina_api_key = None
287
+
288
+ # Set default repetition_penalty if not provided
289
+ if repetition_penalty is None:
290
+ repetition_penalty = 1.05 if 'qwq' in model_path.lower() else 1.0
291
+
292
+ # Data paths based on dataset
293
+ if split == "test": # 测试用的数据集地址
294
+ data_path = f"./data/test/{dataset_name}.json"
295
+ else: # 训练用的数据集地址
296
+ if dataset_name == 'livecode':
297
+ data_path = f'./data/LiveCodeBench/{split}.json'
298
+ elif dataset_name in ['math500', 'gpqa', 'aime', 'amc']:
299
+ data_path = f'./data/{dataset_name.upper()}/{split}.json'
300
+ else:
301
+ data_path = f'./data/QA_Datasets/{dataset_name}.json'
302
+
303
+ print('-----------------------')
304
+ print(f'Using {dataset_name} {split} set.')
305
+ print('-----------------------')
306
+
307
+ # ---------------------- Caching Mechanism ----------------------
308
+ # Define cache directories and file paths
309
+ # cache_dir = './cache'
310
+ model_name = model_path.split('/')[-1].replace('-instruct', '')
311
+ # cache_dir = f'./{cache_dir_base}_{dataset_name}_{model_name}'
312
+ cache_dir = cache_dir_base
313
+ search_cache_path = os.path.join(cache_dir, 'search_cache.json')
314
+ url_cache_path = os.path.join(cache_dir, 'url_cache.json')
315
+
316
+ # Ensure cache directory exists
317
+ os.makedirs(cache_dir, exist_ok=True)
318
+
319
+ # Load existing caches or initialize empty dictionaries
320
+ if os.path.exists(search_cache_path):
321
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
322
+ search_cache = json.load(f)
323
+ else:
324
+ search_cache = {}
325
+
326
+ if os.path.exists(url_cache_path):
327
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
328
+ url_cache = json.load(f)
329
+ else:
330
+ url_cache = {}
331
+
332
+ # Function to save caches
333
+ def save_caches():
334
+ with open(search_cache_path, 'w', encoding='utf-8') as f:
335
+ json.dump(search_cache, f, ensure_ascii=False, indent=2)
336
+ with open(url_cache_path, 'w', encoding='utf-8') as f:
337
+ json.dump(url_cache, f, ensure_ascii=False, indent=2)
338
+
339
+ # ---------------------- Model Loading ----------------------
340
+ print(f"Loading tokenizer from {model_path}...")
341
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
342
+ if tokenizer.pad_token is None:
343
+ tokenizer.pad_token = tokenizer.eos_token
344
+ tokenizer.padding_side = 'left' # 主要是左填充
345
+ print("Tokenizer loaded successfully.")
346
+
347
+ # Define output directory based on model and dataset
348
+
349
+
350
+ # if 'qwq' in model_path.lower():
351
+ # if dataset_name in ['math500', 'gpqa', 'aime', 'amc', 'livecode']:
352
+ # output_dir = f'./{output_dir_base}/{dataset_name}.qwq.search_o1'
353
+ # if dataset_name == 'gpqa' and (MAX_SEARCH_LIMIT != 5 or top_k != 10):
354
+ # output_dir = f'./{output_dir_base}/runs.analysis/{dataset_name}.qwq.search_o1.{MAX_SEARCH_LIMIT}.{top_k}'
355
+ # else:
356
+ # output_dir = f'./{output_dir_base}/runs.qa/{dataset_name}.qwq.search_o1'
357
+ # else:
358
+ # model_short_name = model_path.split('/')[-1].lower().replace('-instruct', '')
359
+ # output_dir = f'./{output_dir_base}/runs.baselines/{dataset_name}.{model_short_name}.search_o1'
360
+ output_dir = output_dir_base
361
+ os.makedirs(output_dir, exist_ok=True)
362
+
363
+ print(f"Loading model from {model_path}...")
364
+ print(f"device_count: {torch.cuda.device_count()}")
365
+
366
+ # Initialize the LLM
367
+ llm = LLM(
368
+ model=model_path,
369
+ tensor_parallel_size=torch.cuda.device_count(),
370
+ gpu_memory_utilization=0.95,
371
+
372
+ )
373
+ print("Model loaded successfully.")
374
+
375
+ # # ----------------------Loading model to reason in document ----------------------
376
+
377
+ # print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
378
+ # tokenizer_doc_reason = AutoTokenizer.from_pretrained(model_doc_reason_path, trust_remote_code=True)
379
+ # if tokenizer_doc_reason.pad_token is None:
380
+ # tokenizer_doc_reason.pad_token = tokenizer_doc_reason.eos_token
381
+ # tokenizer_doc_reason.padding_side = 'left' # 主要是左填充
382
+ # print("tokenizer_doc_reason loaded successfully.")
383
+
384
+ # print(f"Loading tokenizer_doc_reason from {model_doc_reason_path}...")
385
+
386
+ # # Initialize the LLM
387
+ # # torch.cuda.set_device(6,7)
388
+
389
+ # llm_doc_reason = LLM(
390
+ # model=model_doc_reason_path,
391
+ # tensor_parallel_size=2,
392
+ # gpu_memory_utilization=0.95,
393
+
394
+ # )
395
+ # print("Model_doc_reason loaded successfully.")
396
+
397
+ # ---------------------- Data Loading ----------------------
398
+ print(f"Loading data from {data_path}...")
399
+ with open(data_path, 'r', encoding='utf-8') as json_file:
400
+ filtered_data = json.load(json_file)
401
+ print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
402
+
403
+ # ---------------------- Batch Generation Function ----------------------
404
+ def generate_webpage_to_reasonchain_batch( # 模型基于网页内容生成推理,然后从模型的回复中提取答案
405
+ original_questions: List[str],
406
+ prev_reasonings: List[str],
407
+ search_queries: List[str],
408
+ documents: List[List[str]],
409
+ dataset_name: str,
410
+ batch_output_records: List[Dict], # New parameter to collect outputs
411
+ max_tokens: int = 32768,
412
+ coherent: bool = False,
413
+ ) -> List[str]:
414
+
415
+ # if "Qwen2.5" in model_path:
416
+ # max_tokens = 8192
417
+
418
+ # encode_docs = tokenizer(documents, truncation=True, max_length=20000, add_special_tokens=False)["input_ids"]
419
+ # documents = tokenizer.batch_decode(encode_docs)
420
+
421
+ # # 统计每个文档的长度
422
+ # doc_lengths = [len(doc) for doc in encode_docs]
423
+
424
+ # # # 打印每个文档的长度
425
+ # # for i, length in enumerate(doc_lengths):
426
+ # # print(f"Document {i + 1}: {length} tokens")
427
+
428
+ # # 如果需要返回长度列表,可以直接使用 doc_lengths
429
+ # print(f"for {model_path}, set max_tokens={max_tokens} for doc gen, truncate documnets. ")
430
+ # print("All document lengths:", doc_lengths)
431
+
432
+ user_prompts = []
433
+ assert len(original_questions) == len(prev_reasonings) == len(search_queries) == len(documents), "Input lists must have the same length"
434
+
435
+ for i, doc_str_list in enumerate(documents):
436
+ assert len(doc_str_list) == top_k, f"Expected {top_k} documents, but got {len(doc_str_list)}"
437
+ for j, doc_str in enumerate(doc_str_list):
438
+ r = prev_reasonings[i]
439
+ sq = search_queries[i]
440
+ user_prompts.append(get_webpage_to_reasonchain_instruction(r, sq, doc_str))
441
+
442
+ # user_prompts = [ # 根据之前的推理,搜索query和搜索到的doc生成用户提示
443
+ # get_webpage_to_reasonchain_instruction(r, sq, doc)
444
+ # for r, sq, doc in zip(prev_reasonings, search_queries, documents)
445
+ # ]
446
+
447
+ prompts = [{"role": "user", "content": up} for up in user_prompts]
448
+ prompts = [tokenizer.apply_chat_template([p], tokenize=False, add_generation_prompt=True) for p in prompts]
449
+
450
+ output = llm.generate( # 生成模型回复
451
+ prompts,
452
+ sampling_params=SamplingParams(
453
+ max_tokens=max_tokens,
454
+ temperature=0.7,
455
+ top_p=0.8,
456
+ top_k=20,
457
+ repetition_penalty=1.05,
458
+ )
459
+ )
460
+
461
+ raw_outputs = [out.outputs[0].text for out in output]
462
+ extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs] # 提取模型基于网页生成的推理
463
+
464
+ formatted_infos = []
465
+ formatted_outputs = []
466
+ formatted_prompts = []
467
+ formatted_info = ""
468
+ formatted_output = []
469
+ formatted_prompt = []
470
+ for i, (p, e, r) in enumerate(zip(prompts, extracted_infos, raw_outputs)):
471
+ if i % top_k == 0 and i != 0:
472
+ formatted_infos.append(formatted_info)
473
+ formatted_outputs.append(formatted_output)
474
+ formatted_prompts.append(formatted_prompt)
475
+ formatted_info = ""
476
+ formatted_output = []
477
+ formatted_prompt = []
478
+
479
+ formatted_info += f"Summary of Web Page {i % top_k + 1}:\n{e}\n\n"
480
+ formatted_output.append(r)
481
+ formatted_prompt.append(p)
482
+
483
+ # 最后一组
484
+ formatted_infos.append(formatted_info)
485
+ formatted_outputs.append(formatted_output)
486
+ formatted_prompts.append(formatted_prompt)
487
+
488
+ assert len(formatted_infos) == len(formatted_outputs) == len(formatted_prompts) == len(original_questions), "Number of formatted_infos and formatted_outputs must match number of questions"
489
+ for i, (p, r, e) in enumerate(zip(formatted_prompts, formatted_outputs, formatted_infos)):
490
+ batch_output_records.append({
491
+ 'prompt': p,
492
+ 'raw_output': r,
493
+ 'extracted_info': e
494
+ })
495
+
496
+ return extracted_infos
497
+
498
+ # ---------------------- Preparation of Input Prompts ----------------------
499
+ input_list = []
500
+ for item in filtered_data: # 生成prompts
501
+ question = item['Question']
502
+
503
+ if dataset_name in ['nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
504
+ if dataset_name in ['nq', 'triviaqa']:
505
+ instruction = get_singleqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
506
+ elif dataset_name in ['hotpotqa', 'musique', 'bamboogle', '2wiki']:
507
+ instruction = get_multiqa_search_o1_instruction_1(MAX_SEARCH_LIMIT)
508
+ if 'qwq' in model_path.lower():
509
+ user_prompt = get_task_instruction_openqa(question, model_name='qwq')
510
+ else:
511
+ user_prompt = get_task_instruction_openqa(question)
512
+
513
+ elif dataset_name in ['math500', 'aime', 'amc']:
514
+ instruction = get_math_search_o1_instruction_1(MAX_SEARCH_LIMIT)
515
+ if 'qwq' in model_path.lower():
516
+ user_prompt = get_task_instruction_math(question, model_name='qwq')
517
+ else:
518
+ user_prompt = get_task_instruction_math(question)
519
+
520
+ elif dataset_name == 'gpqa':
521
+ instruction = get_gpqa_search_o1_instruction(MAX_SEARCH_LIMIT)
522
+ if 'qwq' in model_path.lower():
523
+ user_prompt = get_task_instruction_multi_choice(question, model_name='qwq')
524
+ elif 'llama' in model_path.lower():
525
+ user_prompt = get_task_instruction_multi_choice(question, model_name='llama')
526
+ else:
527
+ user_prompt = get_task_instruction_multi_choice(question)
528
+
529
+ elif dataset_name == 'livecode':
530
+ instruction = get_code_search_o1_instruction(MAX_SEARCH_LIMIT)
531
+ question_title = item.get('question_title', '')
532
+ if 'qwq' in model_path.lower():
533
+ user_prompt = get_task_instruction_code(question, question_title=question_title, model_name='qwq')
534
+ else:
535
+ user_prompt = get_task_instruction_code(question)
536
+ else:
537
+ user_prompt = "" # Default to empty if dataset not matched
538
+
539
+ prompt = [{"role": "user", "content": instruction + user_prompt}] # instruction是告诉模型怎么进行搜索,user_prompt是用户具体问题
540
+ prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
541
+ input_list.append(prompt)
542
+
543
+ if subset_num != -1:
544
+ input_list = input_list[:subset_num]
545
+ filtered_data = filtered_data[:subset_num]
546
+
547
+ # Initialize active sequences
548
+ active_sequences = [{ # 记录每个问题的搜索历史
549
+ 'item': item,
550
+ 'prompt': prompt,
551
+ 'output': '',
552
+ 'finished': False, # 一开始均为未完成
553
+ 'history': [],
554
+ 'search_count': 0,
555
+ 'executed_search_queries': set(),
556
+ 'all_info': [],
557
+ } for item, prompt in zip(filtered_data, input_list)]
558
+
559
+ # ---------------------- Set Max Tokens ----------------------
560
+ # if 'qwq' in model_path.lower():
561
+ # if dataset_name in ['aime', 'amc', 'livecode']:
562
+ # max_tokens = 32768
563
+ # else:
564
+ # max_tokens = 20480
565
+ # else:
566
+ # max_tokens = 8192
567
+ # max_tokens = 16384
568
+ if dataset_name in ['aime', 'amc', 'livecode']:
569
+ max_tokens = 32768
570
+ else:
571
+ max_tokens = 20480
572
+ # ---------------------- Generation Function ----------------------
573
+ def run_generation(sequences: List[Dict], max_tokens: int) -> List:
574
+ prompts = [s['prompt'] for s in sequences] # 提取prompt
575
+ sampling_params = SamplingParams(
576
+ max_tokens=max_tokens,
577
+ temperature=temperature,
578
+ top_p=top_p,
579
+ top_k=top_k_sampling,
580
+ repetition_penalty=repetition_penalty,
581
+ stop=[END_SEARCH_QUERY, tokenizer.eos_token],
582
+ include_stop_str_in_output=True,
583
+ )
584
+ output_list = llm.generate(prompts, sampling_params=sampling_params) # 模型根据prompt生成回答
585
+ print(f"run_generation completed {len(output_list)}")
586
+ return output_list
587
+
588
+ # Function to extract text between two tags 提取位于 start_tag 和 end_tag 之间的内容
589
+ def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
590
+ pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
591
+ matches = re.findall(pattern, text, flags=re.DOTALL)
592
+ if matches:
593
+ return matches[-1].strip()
594
+ return None
595
+
596
+ def replace_recent_steps(origin_str, replace_str): # 使用replace_str更新origin_str
597
+ """
598
+ Replaces specific steps in the original reasoning steps with new steps.
599
+ If a replacement step contains "DELETE THIS STEP", that step is removed.
600
+
601
+ Parameters:
602
+ - origin_str (str): The original reasoning steps.
603
+ - replace_str (str): The steps to replace or delete.
604
+
605
+ Returns:
606
+ - str: The updated reasoning steps after applying replacements.
607
+ 这个函数的主要功能是替换给定的推理步骤(reasoning steps),
608
+ 并根据传入的 replace_str 进行更新。
609
+ 如果新的步骤包含 "DELETE THIS STEP",则删除该步骤
610
+ """
611
+
612
+ def parse_steps(text):
613
+ """
614
+ Parses the reasoning steps from a given text.
615
+
616
+ Parameters:
617
+ - text (str): The text containing reasoning steps.
618
+
619
+ Returns:
620
+ - dict: A dictionary mapping step numbers to their content.
621
+ """
622
+ step_pattern = re.compile(r"Step\s+(\d+):\s*") # 这个模式会匹配 "Step" 后面跟一个或多个空格,然后是一个数字(步骤编号),最后是冒号
623
+ steps = {}
624
+ current_step_num = None
625
+ current_content = []
626
+
627
+ for line in text.splitlines(): # 将输入的文本按行分割,并逐行遍历。每一行会被检查是否包含一个步骤
628
+ step_match = step_pattern.match(line)
629
+ if step_match: # 匹配到一个新的步骤
630
+ # If there's an ongoing step, save its content,如果当前的步骤不为空,将其为上一个步骤,先将上一个步骤的内容(存在current_content中)保存,然后再更新current_step_num和current_content
631
+ if current_step_num is not None:
632
+ steps[current_step_num] = "\n".join(current_content).strip()
633
+ current_step_num = int(step_match.group(1))
634
+ content = line[step_match.end():].strip()
635
+ current_content = [content] if content else []
636
+ else:
637
+ if current_step_num is not None:
638
+ current_content.append(line)
639
+
640
+ # Save the last step if any
641
+ if current_step_num is not None: # 保存最后一个步骤
642
+ steps[current_step_num] = "\n".join(current_content).strip()
643
+
644
+ return steps
645
+
646
+ # Parse the original and replacement steps
647
+ origin_steps = parse_steps(origin_str) # 解析原始的推理步骤
648
+ replace_steps = parse_steps(replace_str) # 解析要替换的推理步骤
649
+
650
+ # Apply replacements
651
+ for step_num, content in replace_steps.items(): # 遍历要替换的步骤
652
+ if "DELETE THIS STEP" in content:
653
+ # Remove the step if it exists
654
+ if step_num in origin_steps: # 如果要删除的步骤在原始的推理步骤中存在,则删除该步骤
655
+ del origin_steps[step_num]
656
+ else: # 如果要替换的步骤不是要删除的步骤,则替换该步骤
657
+ # Replace or add the step
658
+ origin_steps[step_num] = content
659
+
660
+ # Sort the steps by step number
661
+ sorted_steps = sorted(origin_steps.items()) # 按照步骤编号对步骤进行排序
662
+
663
+ # Reconstruct the reasoning steps as a single string
664
+ new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps]) # 根据排序后的步骤构建新的推理步骤字符串,步骤之间以两个换行符分隔
665
+
666
+ return new_reasoning_steps
667
+
668
+ # ---------------------- Initialize Collection Structure ----------------------
669
+ # Initialize a list to collect batch outputs
670
+ batch_output_records = []
671
+
672
+ start_time = time.time()
673
+ turn = 0
674
+
675
+ # 流程
676
+ # 首先根据prompt让模型生成回复
677
+ # 从模型的回复中提取搜索查询
678
+ # 如果有(回复要以END_SEARCH_QUERY结尾)
679
+ # 根据搜索查询,从互联网上提取相关信息
680
+ # 处理查询的信息
681
+ # 让模型基于之前的步骤,检索query和查询到的信息生成新的推理,得到search result
682
+ # 然后回到第一步(这里模型就会根据前面的search result,再次生成新的回复
683
+ # 如果没有查询则该条问题结束
684
+
685
+ # Main loop until all sequences are finished or maximum turns reached
686
+ while True:
687
+ # Identify sequences that need generation
688
+ sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']] # 筛选出需要生成的新内容的序列,active_sequences 是一个包含所有活跃序列的列表
689
+
690
+ if sequences_needing_generation:
691
+ turn += 1
692
+ print(f'\n-------------- Turn {turn} --------------')
693
+ print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
694
+ outputs = run_generation(sequences_needing_generation, max_tokens) # 根据prompt
695
+ print("Generation completed, processing outputs...")
696
+
697
+ # Initialize batch variables
698
+ batch_relevant_info = []
699
+ batch_original_questions = []
700
+ batch_prev_reasonings = []
701
+ batch_search_queries = []
702
+ batch_documents = []
703
+ batch_sequences = []
704
+
705
+ # Collect URLs to fetch across all sequences
706
+ all_urls_to_fetch = set() # 初始化一个集合 all_urls_to_fetch 用来收集所有需要获取的 URL
707
+ url_snippets = {}
708
+ url_sequence_map = {} # Map URL to list of sequences needing it
709
+
710
+ # Process each sequence and collect URLs
711
+ for seq, out in zip(sequences_needing_generation, outputs): # 遍历需要生成新内容的序列,并生成新内容,同时收集需要获取的 URL
712
+ text = out.outputs[0].text # 将生成的文本添加到序列的历史记录、提示和输出中
713
+ seq['history'].append(text)
714
+ # Append generated text to prompt and output
715
+ seq['prompt'] += text
716
+ seq['output'] += text
717
+ seq['all_info'].append({f"turn_{turn}_reason": text})
718
+ # Extract search query
719
+ search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY) # 提取搜索查询
720
+
721
+ # If a search query is present and needs to be executed
722
+ if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
723
+ if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
724
+ # Execute search, use cache if available
725
+ if search_query in search_cache:
726
+ results = search_cache[search_query] # 如果搜索查询结果在缓存中存在,则从缓存中获取结果
727
+ print(f"Using cached search results for query: \"{search_query}\"")
728
+ else:
729
+ try:
730
+ print(f"Execute and cache search for query: \"{search_query}\"")
731
+ results = bing_web_search(search_query, bing_subscription_key, bing_endpoint, market='en-US', language='en') # 执行搜索
732
+ search_cache[search_query] = results # 将搜索结果添加到缓存中
733
+ print(f"Executed and cached search for query: \"{search_query}\"")
734
+ except Exception as e:
735
+ print(f"Error during search query '{search_query}': {e}")
736
+ search_cache[search_query] = {}
737
+ results = {}
738
+
739
+ # Extract relevant information from Bing search results
740
+ relevant_info = extract_relevant_info(results)[:top_k] # 从搜索结果中提取出最相关的信息
741
+ seq['relevant_info'] = relevant_info
742
+
743
+ # Extract URLs and snippets
744
+ urls_to_fetch = [it['url'] for it in relevant_info] # 从搜索结果中提取出所有 URL
745
+ snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info} # 创建一个字典 snippets,将 URL 映射到它们对应的片段(snippet)。如果 snippet 字段存在,则把它加入字典
746
+
747
+ # Filter URLs that are not cached
748
+ urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache] # 筛选出所有没有被缓存的 UR
749
+ cached_urls = [u for u in urls_to_fetch if u in url_cache] # 选出已经缓存的 URL(即在 url_cache 中存在的 URL)。这些 URL 已经被处理过,不需要再次请求
750
+
751
+ # Store info for all_urls_to_fetch and url_snippets
752
+ for url in urls_to_fetch_filtered:
753
+ all_urls_to_fetch.add(url)
754
+ url_snippets[url] = snippets.get(url, "") # 将每个 URL 对应的片段存储到 url_snippets 字典中
755
+
756
+ all_reasoning_steps = seq['output']
757
+ all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n") # 将连续的空行(\n\n)替换为单个换行符(\n),然后按换行符拆分文本,得到每个推理步骤单独的一行
758
+
759
+ truncated_prev_reasoning = ""
760
+ for i, step in enumerate(all_reasoning_steps):
761
+ truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n" # 遍历 all_reasoning_steps 中的每个步骤,并将每个步骤编号和步骤内容格式化后,添加到 truncated_prev_reasoning 字符串中。这样生成一个包含步骤编号和内容的字符串
762
+
763
+ prev_steps = truncated_prev_reasoning.split('\n\n') # 将推理步骤字符串 truncated_prev_reasoning 按照每两个换行符拆分成多个步骤
764
+ if len(prev_steps) <= 5: # 如果步骤的数量不超过 5,直接保留所有步骤
765
+ truncated_prev_reasoning = '\n\n'.join(prev_steps)
766
+ else:
767
+ truncated_prev_reasoning = ''
768
+ for i, step in enumerate(prev_steps): # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
769
+ if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
770
+ truncated_prev_reasoning += step + '\n\n'
771
+ else: # 如果步骤超过 5 个,进行截断。保留第一个步骤、最后四个步骤以及包含搜索查询或搜索结果的步骤。如果中间的步骤不重要,则用 ... 来表示省略
772
+ if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
773
+ truncated_prev_reasoning += '...\n\n'
774
+ truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
775
+
776
+ # Collect parameters for batch processing
777
+ batch_relevant_info.append(relevant_info) # 搜索出来的信息
778
+ batch_original_questions.append(seq['item']['Question']) # 原始问题
779
+ batch_prev_reasonings.append(truncated_prev_reasoning) # 之前的推理步骤
780
+ batch_search_queries.append(search_query) # 搜索查询
781
+ batch_sequences.append(seq)
782
+
783
+ # Update search count and executed queries
784
+ seq['search_count'] += 1 # 更新搜索计数
785
+ seq['executed_search_queries'].add(search_query) # 将已执行的搜索查询添加到集合中
786
+
787
+ elif seq['search_count'] >= MAX_SEARCH_LIMIT: # 如果搜索次数达到或超过该限制,则返回一条消息,通知该查询无法再进行
788
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
789
+ seq['prompt'] += limit_message
790
+ seq['output'] += limit_message
791
+ seq['history'].append(limit_message)
792
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
793
+ print(f"Search limit reached for query: \"{search_query}\"")
794
+
795
+ elif search_query in seq['executed_search_queries']: # 如果当前查询已经执行过,则返回一个消息,提示用户查询已重复,并引导其查看之前的结果
796
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
797
+ seq['prompt'] += limit_message
798
+ seq['output'] += limit_message
799
+ seq['history'].append(limit_message)
800
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
801
+ print(f"Repeated search for query: \"{search_query}\"")
802
+
803
+
804
+ else: # 如果当前序���没有需要执行的搜索查询,则将该序列标记为完成,并打印提示信息
805
+ # If no search query needs to be executed, mark the sequence as finished
806
+ seq['finished'] = True
807
+ print("Sequence marked as complete.")
808
+
809
+ print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
810
+ print(f"all_urls_to_fetch: {all_urls_to_fetch}")
811
+ # Batch fetch all URLs at once to optimize speed
812
+
813
+ if all_urls_to_fetch:
814
+ print(f"Fetching {len(all_urls_to_fetch)} URLs...")
815
+ try:
816
+ fetched_contents = fetch_page_content( # 一次性获取所有 URL 的中搜索出来的内容
817
+ list(all_urls_to_fetch),
818
+ use_jina=use_jina,
819
+ jina_api_key=jina_api_key,
820
+ # snippets=url_snippets # Do not pass snippets when updating url_cache directly
821
+ )
822
+ print(f"Fetched {len(fetched_contents)} URLs successfully.")
823
+ except Exception as e:
824
+ print(f"Error during batch URL fetching: {e}")
825
+ fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
826
+ # Update cache with fetched contents
827
+ for url, content in fetched_contents.items(): # 将获取的内容添加到 url_cache 中
828
+ url_cache[url] = content
829
+
830
+ # After fetching, prepare formatted documents for batch processing
831
+ for relevant_info in batch_relevant_info:
832
+ formatted_documents = "" # 初始化一个空字符串 formatted_documents,用于拼接本次要处理的所有网页信息。后面会将其添加到 batch_documents 列表中
833
+
834
+ doc_str_list = []
835
+ for i, doc_info in enumerate(relevant_info):
836
+ url = doc_info['url']
837
+ raw_context = url_cache.get(url, "") # 获取 url 对应的内容
838
+ doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
839
+ success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
840
+ if success:
841
+ context = filtered_context
842
+ else: # 否则就取 raw_context 的前 max_doc_len * 2 个字符,作为一个有限的片段,避免过长导致后续处理负担
843
+ context = raw_context[:max_doc_len*2]
844
+
845
+ doc_info['context'] = context
846
+ # formatted_documents += f"**Web Page {i + 1}:**\n"
847
+ # formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
848
+ single_doc = json.dump(doc_info, ensure_ascii=False, indent=2)
849
+ doc_str_list.append(single_doc)
850
+ batch_documents.append(doc_str_list) # 将本组搜索结果的所有信息(拼接成的字符串 formatted_documents)添加到 batch_documents 列表中
851
+
852
+ # After fetching, prepare for batch processing if there are any
853
+ if batch_sequences:
854
+ print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
855
+ webpage_analyses = generate_webpage_to_reasonchain_batch( # 根据前面处理好的结果,生成新的推理
856
+ original_questions=batch_original_questions,
857
+ prev_reasonings=batch_prev_reasonings,
858
+ search_queries=batch_search_queries,
859
+ documents=batch_documents,
860
+ dataset_name=dataset_name,
861
+ batch_output_records=batch_output_records, # Pass the collection list
862
+ max_tokens=max_tokens,
863
+ )
864
+ print("Batch generation completed, assigning outputs to sequences...")
865
+
866
+ for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents): # 遍历批处理返回的 webpage_analyses,将处理结果与相应的序列 seq 进行一一对应
867
+ if isinstance(analysis, str): # 判断 analysis 是否是纯字符串。如果是字符串,说明直接可以追加到序列的文本中
868
+ append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n" # 封装处理结果,添加到序列的历史记录、提示和输出中
869
+ seq['prompt'] += append_text
870
+ seq['output'] += append_text
871
+ seq['history'].append(append_text) # 存的是每一次的webpage_analyses
872
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
873
+ else: # 如果 analysis 不是纯字符串,那么可能是一种特殊的结构,比如表示需要替换推��步骤的 dict 或其他格式
874
+ append_text = replace_recent_steps(seq['output'], analysis)
875
+ seq['prompt'] += append_text
876
+ seq['output'] += append_text
877
+ seq['history'].append(append_text)
878
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
879
+
880
+ # Check if all sequences are finished
881
+ # 保存active_sequences
882
+ active_sequences_part = [{ # 记录每个问题的搜索历史
883
+ 'item': ele["item"],
884
+ 'prompt': ele['prompt'],
885
+ 'output': ele["output"],
886
+ 'finished': ele["finished"], # 一开始均为未完成
887
+ 'history':ele["history"],
888
+ 'search_count': ele["search_count"],
889
+ 'all_info': ele['all_info']
890
+ } for ele in active_sequences]
891
+ with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
892
+ json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
893
+ unfinished = [seq for seq in active_sequences if not seq['finished']] # 是否结束是基于模型是否生成了新的搜索
894
+ if not unfinished:
895
+ break
896
+ else:
897
+ if turn >= MAX_TURN:
898
+ print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
899
+ break
900
+
901
+ total_time = time.time() - start_time
902
+ print(f"Total time taken: {total_time} seconds")
903
+
904
+ # ---------------------- Save Batch Output Records to JSON File ----------------------
905
+ # Define output JSON file path
906
+ t = time.localtime()
907
+ batch_output_file = os.path.join(output_dir, f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
908
+
909
+ # Save batch_output_records to JSON file
910
+ with open(batch_output_file, 'w', encoding='utf-8') as f: # 这里存的是webpage推理时的输入和输出和提取后的信息
911
+ json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
912
+
913
+ print(f"Batch outputs saved to {batch_output_file}")
914
+
915
+ # Prepare output list for evaluation
916
+ output_list = [seq['output'] for seq in active_sequences]
917
+
918
+ # Run evaluation
919
+ run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split)
920
+
921
+ # 评测has answer信息
922
+ turn_files = os.listdir(output_dir)
923
+ turn_files = [file for file in turn_files if file.startswith("turn_")]
924
+ max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
925
+
926
+ max_turn_file_path = os.path.join(output_dir, max_turn_file)
927
+ print(f"max_turn_file_path: {max_turn_file_path}")
928
+ add_eval(model_path, max_turn_file_path)
929
+
930
+ # ---------------------- Update Search and URL Cache ----------------------
931
+ print('Updating Search and URL Cache...')
932
+ # Load existing caches or initialize empty dictionaries
933
+ if os.path.exists(search_cache_path):
934
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
935
+ search_cache_new = json.load(f)
936
+ else:
937
+ search_cache_new = {}
938
+
939
+ if os.path.exists(url_cache_path):
940
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
941
+ url_cache_new = json.load(f)
942
+ else:
943
+ url_cache_new = {}
944
+
945
+ search_cache.update(search_cache_new)
946
+ url_cache.update(url_cache_new)
947
+
948
+ save_caches()
949
+
950
+ print("Process completed.")
951
+
952
+ if __name__ == "__main__":
953
+ main()
deep_search/data_syn/select_data.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from collections import defaultdict,OrderedDict, Counter
3
+ from tqdm import tqdm
4
+ import random
5
+ import matplotlib.pyplot as plt
6
+
7
+
8
+
9
+ def analyze_and_visualize_sources(final_dataset, output_image_path="source_distribution_selected_data.png", output_json_path="source_counts.json"):
10
+ """
11
+ 统计 final_dataset 中 source 的分布,并绘制饼图和保存为 JSON 文件。
12
+
13
+ :param final_dataset: 输入的数据列表,每个元素是一个字典
14
+ :param output_image_path: 饼图保存路径(默认为 "source_distribution.png")
15
+ :param output_json_path: JSON 文件保存路径(默认为 "source_counts.json")
16
+ """
17
+ # 提取所有包含 source 的项
18
+ sources = [item["source"] for item in final_dataset if "source" in item]
19
+
20
+ # 统计 source 的分布
21
+ source_counts = Counter(sources)
22
+
23
+ # 将统计结果保存为 JSON 文件
24
+ with open(output_json_path, "w", encoding="utf-8") as f:
25
+ json.dump(source_counts, f, ensure_ascii=False, indent=4)
26
+ print(f"Source 分布已保存到 {output_json_path}")
27
+
28
+ # 绘制饼图
29
+ labels = list(source_counts.keys())
30
+ counts = list(source_counts.values())
31
+
32
+ plt.figure(figsize=(8, 8))
33
+ plt.pie(counts, labels=labels, autopct='%1.1f%%', startangle=140)
34
+ plt.title("Source Distribution")
35
+ plt.axis('equal') # 确保饼图为正圆
36
+
37
+ # 保存饼图为图片文件
38
+ plt.savefig(output_image_path)
39
+ plt.close()
40
+ print(f"Source 分布饼图已保存到 {output_image_path}")
41
+
42
+ # 读取原始数据
43
+ with open('/share/project/sunshuang/deep_search/data_for_rl/tagged_domain_keypoints/merged_data_tagged_domain_keypoints_keywords_count.json') as f:
44
+ data = json.load(f)
45
+
46
+ for idx, item in enumerate(data): # 增加Question key
47
+ if "question" in item:
48
+ item["Question"] = item["question"]
49
+
50
+
51
+ # step 1 预处理 Domain 分类
52
+ # 统计原始 domain 分布
53
+ print("step 1: preprocss domain data")
54
+ domain_counter = defaultdict(int)
55
+ for item in data:
56
+ domain = item["domain_keypoints"]["domain"]
57
+ domain_counter[domain] += 1
58
+
59
+ # 确定有效 domain(>=100 条)
60
+ # valid_domains = {d for d, cnt in domain_counter.items() if cnt >= 100}
61
+ # other_domains = [d for d, cnt in domain_counter.items() if cnt < 100]
62
+ valid_domains = {d for d, cnt in domain_counter.items() if cnt >= 500}
63
+ other_domains = [d for d, cnt in domain_counter.items() if cnt < 500]
64
+ print(f"valid_domains: {len(valid_domains)}")
65
+ print(f"other_domains: {len(other_domains)}")
66
+
67
+ # 重构数据集(合并 small domains 到 other)
68
+ processed_data = []
69
+ for item in data:
70
+ original_domain = item["domain_keypoints"]["domain"]
71
+ if original_domain in valid_domains:
72
+ new_domain = original_domain
73
+ else:
74
+ new_domain = "other"
75
+
76
+ new_item = {
77
+ **item,
78
+ "domain_keypoints": {
79
+ **item["domain_keypoints"],
80
+ "domain": new_domain
81
+ }
82
+ }
83
+ processed_data.append(new_item)
84
+
85
+
86
+
87
+ # step 2 分配样本配额
88
+ # 计算新 domain 分布
89
+ print("step 2: calculate domain distribution")
90
+ new_domain_counts = defaultdict(int)
91
+ for item in processed_data:
92
+ domain = item["domain_keypoints"]["domain"]
93
+ new_domain_counts[domain] += 1
94
+
95
+
96
+ # 确定最终 domain 列表
97
+ final_domains = list(valid_domains)
98
+ if new_domain_counts.get("other", 0) > 0:
99
+ final_domains.append("other")
100
+
101
+ print(f"new_domain_counts: {len(new_domain_counts)}")
102
+ print(f"final_domains: {len(final_domains)}")
103
+
104
+ # 分配 2000 条样本配额
105
+ total_samples = 3000
106
+ num_domains = len(final_domains)
107
+ base_quota = total_samples // num_domains
108
+ remainder = total_samples % num_domains
109
+ print(f"base_quota: {base_quota}")
110
+ print(f"remainder: {remainder}")
111
+
112
+ domain_quotas = {}
113
+ # cnttt = 0
114
+ for idx, domain in enumerate(final_domains):
115
+ domain_quotas[domain] = base_quota + (1 if idx < remainder else 0)
116
+ # cnttt += domain_quotas[domain]
117
+
118
+ # print(f"cnttt: {cnttt}")
119
+
120
+
121
+ # step 3 统计 Keypoints 分布
122
+ print("step 3 calculating keypoints distribution")
123
+ domain_keypoint_dist = defaultdict(lambda: defaultdict(int))
124
+
125
+ for item in tqdm(processed_data, desc="Calculating keypoints distribution"):
126
+ domain = item["domain_keypoints"]["domain"]
127
+ keypoints = item["domain_keypoints"]["key_points"]
128
+ for kp in keypoints:
129
+ domain_keypoint_dist[domain][kp] += 1
130
+
131
+ # domain内部按照key points降序排列
132
+ for domain, keypoint_counts in domain_keypoint_dist.items():
133
+ sorted_keypoints = sorted(keypoint_counts.items(), key=lambda x: x[1], reverse=True)
134
+ domain_keypoint_dist[domain] = OrderedDict(sorted_keypoints)
135
+
136
+
137
+ # 保存分布文件
138
+ with open("domain_keypoints_distribution_without_remove_dup.json", "w") as f:
139
+ json.dump(domain_keypoint_dist, f, indent=4,ensure_ascii=False)
140
+
141
+
142
+ # step 4 分层抽样与去重
143
+ print("step 4: select data")
144
+ final_dataset = []
145
+ question_set = set()
146
+
147
+ real_selected_cnt = 0
148
+ for domain in tqdm(final_domains, desc="Processing domains"):
149
+ # 获取当前 domain ��所有数据
150
+ domain_data = [item for item in processed_data if item["domain_keypoints"]["domain"] == domain]
151
+ print(f"-------------- process {domain}")
152
+ print(f"data: {len(domain_data)}")
153
+ # 计算 keypoint 配额
154
+ keypoints = list(domain_keypoint_dist[domain].keys())
155
+ # random.shuffle(keypoints) # 随机打乱顺序
156
+
157
+ print(f"key points {len(keypoints)}")
158
+ print(f"domain_quotas: {domain_quotas[domain]}")
159
+ # kp_base = domain_quotas[domain] // len(keypoints)
160
+ # print(f"kp_base: {kp_base}")
161
+ # kp_remainder = domain_quotas[domain] % len(keypoints)
162
+ # print(f"kp_remainder: {kp_remainder}")
163
+ # 因为domain_quotas远远小于keypoint的数目,改为将所有的数据按照total降序排序,然后从上往下选,重复上面的操作,直到选完
164
+
165
+ sorted_domain_data = sorted(domain_data, key=lambda x: x["keywords_count"]["total"], reverse=True)
166
+
167
+ selected_cnt = 0
168
+ cycle_cnt = 0
169
+ selected_index = [] # 已选择的index
170
+ while selected_cnt < domain_quotas[domain]: # 当前选择的数目小于domain_quotas[domain]
171
+ cycle_cnt += 1
172
+ # print(f"cycle_cnt: {cycle_cnt}")
173
+ selected_keypoints = set() # 每次循环都清零
174
+ for idx, item in enumerate(sorted_domain_data): # 遍历所有的数据
175
+ if selected_cnt >= domain_quotas[domain]:
176
+ break
177
+ if idx not in selected_index: # 当前数据未被选择
178
+ dup_keypoints = False
179
+ for key_point in item["domain_keypoints"]["key_points"]: # 确保数据的key point不包含在已经选择的key point里
180
+ if key_point in selected_keypoints:
181
+ dup_keypoints = True
182
+ break
183
+ if dup_keypoints: # 重复key points
184
+ continue
185
+ if item["Question"] not in question_set:
186
+ final_dataset.append(item)
187
+ question_set.add(item["Question"])
188
+ selected_keypoints.update(item["domain_keypoints"]["key_points"])
189
+ selected_index.append(idx)
190
+ selected_cnt += 1
191
+ real_selected_cnt += selected_cnt
192
+ print(f"cycle_cnt: {cycle_cnt}")
193
+
194
+ print(f"real_selected_cnt: {real_selected_cnt}")
195
+
196
+
197
+ # # 按 keypoint 分层抽样
198
+ # selected = []
199
+ # for kp_idx, kp in enumerate(keypoints):
200
+ # # 分配配额
201
+ # kp_quota = kp_base + (1 if kp_idx < kp_remainder else 0)
202
+ # # print(f"kp_quota: {kp_quota}")
203
+ # # 获取包含该 keypoint 的问题
204
+ # kp_items = [item for item in domain_data if kp in item["domain_keypoints"]["key_points"]]
205
+
206
+ # # 按 total 降序排序并选择
207
+ # sorted_items = sorted(kp_items, key=lambda x: x["keywords_count"]["total"], reverse=True)
208
+ # selected.extend(sorted_items[:kp_quota])
209
+
210
+ # # 去重并保留顺序,这一步导致选不够
211
+ # dup_cnt = 0
212
+ # for item in selected:
213
+ # q = item["Question"]
214
+ # if q not in question_set:
215
+ # final_dataset.append(item)
216
+ # question_set.add(q)
217
+ # else:
218
+ # dup_cnt += 1
219
+ # print(f"dup_cnt: {dup_cnt}")
220
+
221
+ # 精确控制最终数量
222
+
223
+
224
+ # final_dataset = sorted(final_dataset, key=lambda x: x["idx"])
225
+ print(f"final_dataset len: {len(final_dataset)}")
226
+ # final_dataset = final_dataset
227
+
228
+ analyze_and_visualize_sources(final_dataset)
229
+
230
+
231
+ # step 5 保存结果
232
+ with open("final_selected_dataset.json", "w") as f:
233
+ json.dump(final_dataset, f, indent=4, ensure_ascii=False)
deep_search/data_syn/select_remain_data.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # run_search_o1.py
2
+ import os
3
+ import json
4
+ import time
5
+ import re
6
+ from tqdm import tqdm
7
+ import numpy as np
8
+ import torch
9
+ import string
10
+ from typing import Optional, Tuple, List, Dict
11
+ import argparse
12
+
13
+
14
+
15
+ def load_json(file_path):
16
+ with open(file_path, "r", encoding="utf-8") as file:
17
+ data = json.load(file)
18
+ print(f"Loaded {len(data)} items from {file_path}")
19
+ return data
20
+
21
+ def save_json(data, file_path):
22
+ with open(file_path, "w", encoding="utf-8") as file:
23
+ json.dump(data, file, ensure_ascii=False, indent=4)
24
+ print(f"Saved {len(data)} items to {file_path}")
25
+
26
+
27
+ file_1 = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/final_dataset_new/final_selected_dataset.json"
28
+ file_2 = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data/final_selected_dataset.json"
29
+ output_file = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data.json"
30
+ data_1 = load_json(file_1)
31
+ data_2 = load_json(file_2)
32
+
33
+ id_1 = [item["idx"] for item in data_1]
34
+
35
+ in_id_1 = 0
36
+ not_in_id_1 = 0
37
+
38
+ in_id_1_id = []
39
+
40
+ dup_id = []
41
+
42
+ remain_data = []
43
+ for item in data_2:
44
+ if item["idx"] not in id_1:
45
+ remain_data.append(item)
46
+ not_in_id_1 += 1
47
+ else:
48
+ if item["idx"] in in_id_1_id:
49
+ dup_id.append(item["idx"])
50
+ else:
51
+
52
+ in_id_1_id.append(item["idx"])
53
+ in_id_1 += 1
54
+
55
+
56
+ print(f"in_id_1: {in_id_1}, not_in_id_1: {not_in_id_1}")
57
+ print(f"dup_id: {len(dup_id)}")
58
+ # save_json(remain_data, output_file)
deep_search/data_syn/source_stats.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from collections import defaultdict,OrderedDict, Counter
3
+ from tqdm import tqdm
4
+ import random
5
+ import matplotlib.pyplot as plt
6
+
7
+
8
+
9
+ def save_to_json(data, filename):
10
+ """保存数据到 JSON 文件"""
11
+ with open(filename, 'w', encoding='utf-8') as f:
12
+ json.dump(data, f, ensure_ascii=False, indent=4)
13
+ print(f"Saved to {filename}, data length: {len(data)}")
14
+
15
+ def load_json(file_path):
16
+ """从 JSON 文件加载数据"""
17
+ with open(file_path, "r", encoding="utf-8") as f:
18
+ data = json.load(f)
19
+ print(f"Loaded from {file_path}, data length: {len(data)}")
20
+ return data
21
+ def analyze_and_visualize_sources(final_dataset, output_image_path="source_distribution_selected_data.png", output_json_path="source_counts.json"):
22
+ """
23
+ 统计 final_dataset 中 source 的分布,并绘制饼图和保存为 JSON 文件。
24
+
25
+ :param final_dataset: 输入的数据列表,每个元素是一个字典
26
+ :param output_image_path: 饼图保存路径(默认为 "source_distribution.png")
27
+ :param output_json_path: JSON 文件保存路径(默认为 "source_counts.json")
28
+ """
29
+ # 提取所有包含 source 的项
30
+ sources = [item["source"] for item in final_dataset if "source" in item]
31
+
32
+ # 统计 source 的分布
33
+ source_counts = Counter(sources)
34
+
35
+ # 将统计结果保存为 JSON 文件
36
+ with open(output_json_path, "w", encoding="utf-8") as f:
37
+ json.dump(source_counts, f, ensure_ascii=False, indent=4)
38
+ print(f"Source 分布已保存到 {output_json_path}")
39
+
40
+ # 绘制饼图
41
+ labels = list(source_counts.keys())
42
+ counts = list(source_counts.values())
43
+
44
+ plt.figure(figsize=(8, 8))
45
+ plt.pie(counts, labels=labels, autopct='%1.1f%%', startangle=140)
46
+ plt.title("Source Distribution")
47
+ plt.axis('equal') # 确保饼图为正圆
48
+
49
+ # 保存饼图为图片文件
50
+ plt.savefig(output_image_path)
51
+ plt.close()
52
+ print(f"Source 分布饼图已保存到 {output_image_path}")
53
+
54
+
55
+ input_file = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/mixed_data_all.json"
56
+ data = load_json(input_file)
57
+ analyze_and_visualize_sources(data)
deep_search/data_syn/test.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # user_prompt = f"""You are an advanced semantic analyzer. For the given question, perform the following tasks step-by-step:
4
+
5
+ # 1. **Domain Identification**:
6
+ # - Determine the broad subject category (domain) this question belongs to.
7
+ # - Examples: film, history, biology, geography, politics, technology, etc (or any other suitable domain)
8
+
9
+ # 2. **Key Point Extraction**:
10
+ # - Identify 2-4 core semantic components that are crucial for answering
11
+ # - Include:
12
+ # • Key entities (e.g., films, people, locations)
13
+ # • Critical attributes (e.g., age, duration, population)
14
+ # • Core relationships (e.g., comparison, causality)
15
+ # • Measurement dimensions (e.g., time, quantity)
16
+ # - Exclude filler words and non-essential descriptors
17
+
18
+ # **Output Requirements**:
19
+ # - Use JSON format: {{"domain": "...", "key_points": [...]}}
20
+ # - Keep key_points concise (1-2 words each)
21
+ # - Use lowercase for all outputs
22
+ # - Separate multiple key_points with commas
23
+
24
+ # **Examples**:
25
+ # Question: "Which film whose director is younger, Charge It To Me or Danger: Diabolik?"
26
+ # Output: {{"domain": "film", "key_points": ["director", "age"]}}
27
+
28
+ # **Now process this question:**
29
+ # {{Question}}"""
30
+
31
+ # print(user_prompt.replace('{Question}', 'Which film whose director is younger, Charge It To Me or Danger: Diabolik?'))
32
+
33
+
34
+
35
+ # import json
36
+
37
+ # txt = "{'domain': 'film', 'key_points': ['directors', 'country', 'same']}"
38
+ # print(json.loads(txt))
39
+
40
+
41
+ # 使用ast.literal_eval解析Python风格字符串(需import ast)
42
+ # import ast
43
+ # py_style_txt = "{'domain': 'film', 'key_points': ['directors', 'country', 'same']}"
44
+ # print(ast.literal_eval(py_style_txt)) # 输出解析后的字典
45
+ import re
46
+ def extract_last_braced_content(s):
47
+ """
48
+ 提取字符串中被 {} 包裹的内容,如果有多个则返回最后一个。
49
+
50
+ :param s: 输入字符串
51
+ :return: 最后一个被 {} 包裹的内容,如果没有则返回 None
52
+ """
53
+ # 使用正则表达式匹配所有被 {} 包裹的内容
54
+ # matches = re.findall(r'\{(.*?)\}', s)
55
+ matches = re.findall(r'\{.*?\}', s)
56
+
57
+ # 如果有匹配的内容,返回最后一个;否则返回 None
58
+ return matches[-1] if matches else None
59
+
60
+ # import json
61
+
62
+ # input_file_path = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset.json"
63
+ # with open(input_file_path, 'r') as f:
64
+ # data = json.load(f)
65
+
66
+ # print(len(data))
67
+
68
+ text = '{"domain": "development", "key_points": ["human development index", "adopted", "time", "employer"]} \n*Note: The key point "human development index" exceeds the 1-2 word limit. To comply strictly, "hdi" (abbreviation) could be used instead for conciseness, but the original term is more precise. Adjusting for the requirement:* \n{"domain": "development", "key_points": ["hdi", "adopted", "time", "employer"]}'
69
+ print(extract_last_braced_content(text))
deep_search/data_syn/testttt.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import matplotlib
2
+ print(matplotlib.matplotlib_fname())
deep_search/search_o1/google_search.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ from requests.exceptions import Timeout
5
+ from bs4 import BeautifulSoup
6
+ from tqdm import tqdm
7
+ import time
8
+ import concurrent
9
+ from concurrent.futures import ThreadPoolExecutor
10
+ # import pdfplumber
11
+ from io import BytesIO
12
+ import re
13
+ import string
14
+ from typing import Optional, Tuple
15
+ from nltk.tokenize import sent_tokenize
16
+
17
+ # os.environ['http_proxy'] = 'http://127.0.0.1:7890'
18
+ # os.environ['https_proxy'] = 'http://127.0.0.1:7890'
19
+
20
+
21
+ # ----------------------- Custom Headers -----------------------
22
+ headers = {
23
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
24
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
25
+ 'Chrome/58.0.3029.110 Safari/537.36',
26
+ 'Referer': 'https://www.google.com/',
27
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
28
+ 'Accept-Language': 'en-US,en;q=0.5',
29
+ 'Connection': 'keep-alive',
30
+ 'Upgrade-Insecure-Requests': '1'
31
+ }
32
+
33
+ # Initialize session
34
+ session = requests.Session()
35
+ session.headers.update(headers)
36
+
37
+ proxies = {
38
+ "http": "http://127.0.0.1:7880",
39
+ "https": "http://127.0.0.1:7880"
40
+ }
41
+
42
+ # def bing_web_search(query, subscription_key, endpoint, market='en-US', language='en', timeout=20):
43
+ # """
44
+ # Perform a search using the Bing Web Search API with a set timeout.
45
+
46
+ # Args:
47
+ # query (str): Search query.
48
+ # subscription_key (str): Subscription key for the Bing Search API.
49
+ # endpoint (str): Endpoint for the Bing Search API.
50
+ # market (str): Market, e.g., "en-US" or "zh-CN".
51
+ # language (str): Language of the results, e.g., "en".
52
+ # timeout (int or float or tuple): Request timeout in seconds.
53
+ # Can be a float representing the total timeout,
54
+ # or a tuple (connect timeout, read timeout).
55
+
56
+ # Returns:
57
+ # dict: JSON response of the search results. Returns None or raises an exception if the request times out.
58
+ # 函数的目标是使用 Bing Web Search API 执行搜索,并返回 JSON 格式的结果。
59
+ # 如果请求超时或出现其他问题,返回空字典({})或抛出异常
60
+ # """
61
+ # # query = f"{query} -site:en.wikipedia.org" # Add site:wikipedia.org to the query to restrict results to Wikipedia
62
+ # print(query)
63
+ # payload = json.dumps({
64
+ # "q": query, # 设置查询内容
65
+ # "mkt": market, # 设置市场
66
+ # "setLang": language, # 设置语言
67
+ # "textDecorations": True, # 启用文本装饰
68
+ # "textFormat": "HTML" # 设置文本格式
69
+ # })
70
+
71
+ # headers = {
72
+ # 'X-API-KEY': subscription_key,
73
+ # 'Content-Type': 'application/json'
74
+ # }
75
+
76
+ # try:
77
+ # # 发送POST请求
78
+ # response = requests.request("POST", endpoint, headers=headers, data=payload)
79
+ # response.raise_for_status() # Raise exception if the request failed 检查响应的状态码。如果返回的状态码是 4xx 或 5xx(表示客户端或服务器错误),它将引发 requests.exceptions.HTTPError 异常
80
+ # search_results = response.json() #
81
+ # return search_results
82
+ # except Timeout:
83
+ # print(f"Bing Web Search request timed out ({timeout} seconds) for query: {query}")
84
+ # return {} # Or you can choose to raise an exception
85
+ # except requests.exceptions.RequestException as e:
86
+ # print(f"Error occurred during Bing Web Search request: {e}")
87
+ # return {}
88
+
89
+ # def bing_web_search(query, subscription_key, endpoint, market='en-US', language='en', timeout=2000):
90
+ # """
91
+ # Perform a search using the Bing Web Search API with a set timeout.
92
+
93
+ # Args:
94
+ # query (str): Search query.
95
+ # subscription_key (str): Subscription key for the Bing Search API.
96
+ # endpoint (str): Endpoint for the Bing Search API.
97
+ # market (str): Market, e.g., "en-US" or "zh-CN".
98
+ # language (str): Language of the results, e.g., "en".
99
+ # timeout (int or float or tuple): Request timeout in seconds.
100
+ # Can be a float representing the total timeout,
101
+ # or a tuple (connect timeout, read timeout).
102
+
103
+ # Returns:
104
+ # dict: JSON response of the search results. Returns None or raises an exception if the request times out.
105
+ # 函数的目标是使用 Bing Web Search API 执行搜索,并返回 JSON 格式的结果。
106
+ # 如果请求超时或出现其他问题,返回空字典({})或抛出异常
107
+ # """
108
+ # payload = json.dumps({
109
+ # "q": query, # 设置查询内容
110
+ # "num": 11,
111
+ # "mkt": market, # 设置市场
112
+ # "setLang": language, # 设置语言
113
+ # "textDecorations": True, # 启用文本装饰
114
+ # "textFormat": "HTML", # 设置文本格式
115
+ # })
116
+
117
+ # headers = {
118
+ # 'X-API-KEY': subscription_key,
119
+ # 'Content-Type': 'application/json'
120
+ # }
121
+ # error_cnt = 0
122
+ # while True:
123
+ # if error_cnt == 20:
124
+ # print(f"qery: {query} has tried {error_cnt} times without success, just skip it.")
125
+ # break
126
+ # try:
127
+ # # 发送POST请求
128
+ # response = requests.request("POST", endpoint, headers=headers, data=payload, proxies=proxies,timeout=timeout)
129
+ # response.raise_for_status() # Raise exception if the request failed 检查响应的状态码。如果返回的状态码是 4xx 或 5xx(表示客户端或服务器错误),它将引发 requests.exceptions.HTTPError 异常
130
+ # search_results = response.json() #
131
+ # return search_results
132
+ # except Timeout:
133
+ # error_cnt += 1
134
+ # print(f"error_cnt: {error_cnt}, Bing Web Search request timed out ({timeout} seconds) for query: {query}")
135
+ # time.sleep(5)
136
+ # # return {} # Or you can choose to raise an exception
137
+ # except requests.exceptions.RequestException as e:
138
+ # error_cnt += 1
139
+ # print(f"error_cnt: {error_cnt}, Error occurred during Bing Web Search request: {e}, payload: {payload}")
140
+ # time.sleep(5)
141
+ # return {}
142
+
143
+
144
+
145
+
146
+ def bing_web_search(query, subscription_key, endpoint, market='en-US', language='en', exclude_urls=[],timeout=2000):
147
+ """
148
+ Perform a search using the Bing Web Search API with a set timeout.
149
+
150
+ Args:
151
+ query (str): Search query.
152
+ subscription_key (str): Subscription key for the Bing Search API.
153
+ endpoint (str): Endpoint for the Bing Search API.
154
+ market (str): Market, e.g., "en-US" or "zh-CN".
155
+ language (str): Language of the results, e.g., "en".
156
+ timeout (int or float or tuple): Request timeout in seconds.
157
+ Can be a float representing the total timeout,
158
+ or a tuple (connect timeout, read timeout).
159
+
160
+ Returns:
161
+ dict: JSON response of the search results. Returns None or raises an exception if the request times out.
162
+ 函数的目标是使用 Bing Web Search API 执行搜索,并返回 JSON 格式的结果。
163
+ 如果请求超时或出现其他问题,返回空字典({})或抛出异常
164
+ """
165
+ exclude_urls = ['https://greysanatomy.fandom.com/wiki/Season_14_(Grey%27s_Anatomy)', "https://abc.com/news/9fce69b2-3b93-40f3-9cd3-d75b1d90bb05/category/738075"]
166
+ # if exclude_urls:
167
+ # for site in exclude_urls:
168
+ # query += f" -site:{site}"
169
+
170
+ payload = json.dumps({
171
+ "q": query, # 设置查询内容
172
+ "num": 11,
173
+ "mkt": market, # 设置市场
174
+ "setLang": language, # 设置语言
175
+ "textDecorations": True, # 启用文本装饰
176
+ "textFormat": "HTML" # 设置文本格式
177
+ })
178
+
179
+ headers = {
180
+ 'X-API-KEY': subscription_key,
181
+ 'Content-Type': 'application/json'
182
+ }
183
+ error_cnt = 0
184
+ while True:
185
+ if error_cnt == 20:
186
+ print(f"qery: {query} has tried {error_cnt} times without success, just skip it.")
187
+ break
188
+ try:
189
+ # 发送POST请求
190
+ response = requests.request("POST", endpoint, headers=headers, data=payload, timeout=timeout)
191
+ response.raise_for_status() # Raise exception if the request failed 检查响应的状态码。如果返回的状态码是 4xx 或 5xx(表示客户端或服务器错误),它将引发 requests.exceptions.HTTPError 异常
192
+ search_results = response.json() #
193
+ return search_results
194
+ except Timeout:
195
+ error_cnt += 1
196
+ print(f"error_cnt: {error_cnt}, Bing Web Search request timed out ({timeout} seconds) for query: {query}")
197
+ time.sleep(5)
198
+ # return {} # Or you can choose to raise an exception
199
+ except requests.exceptions.RequestException as e:
200
+ error_cnt += 1
201
+ print(f"error_cnt: {error_cnt}, Error occurred during Bing Web Search request: {e}, payload: {payload}")
202
+ time.sleep(5)
203
+ # return {}
204
+
205
+ # url = "https://google.serper.dev/search"
206
+
207
+ # payload = json.dumps({
208
+ # "q": "apple inc"
209
+ # })
210
+ # headers = {
211
+ # 'X-API-KEY': 'cb0d28279a826d7e5cf22d71f683c77ffd4ba27d',
212
+ # 'Content-Type': 'application/json'
213
+ # }
214
+
215
+ # response = requests.request("POST", url, headers=headers, data=payload)
216
+
217
+ # print(response.text)
218
+
219
+ # result = google_web_search("apple inc", 'cb0d28279a826d7e5cf22d71f683c77ffd4ba27d')
220
+ # print(result)
221
+
222
+
223
+ def extract_text_from_url(url: str) -> Optional[str]:
224
+ response = session.get(url, timeout=20)
225
+ response.raise_for_status()
226
+ content_type = response.headers.get('Content-Type', '').lower()
227
+ try:
228
+ soup = BeautifulSoup(response.content, 'lxml')
229
+ # print(soup)
230
+ except Exception:
231
+ print("lxml parser not found or failed, falling back to html.parser")
232
+ soup = BeautifulSoup(response.text, 'html.parser')
233
+ text = soup.get_text(separator=' ', strip=True)
234
+ return text
235
+ def save_json(data, file_path):
236
+ with open(file_path, "w", encoding="utf-8") as file:
237
+ json.dump(data, file, ensure_ascii=False, indent=4)
238
+ print(f"Saved {len(data)} items to {file_path}")
239
+
240
+ result = bing_web_search("when does season 14 of grey's anatomy come out", 'cb0d28279a826d7e5cf22d71f683c77ffd4ba27d', 'https://google.serper.dev/search')
241
+
242
+ print(result)
243
+ save_json(result, "/share/project/sunshuang/deep_search/search_o1/search_ex.json")
244
+
245
+ # text = extract_text_from_url("https://greysanatomy.fandom.com/wiki/Season_14_(Grey%27s_Anatomy)")
246
+ # print(text)
deep_search/search_o1/infer_wo_search.sh ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ export CUDA_VISIBLE_DEVICES=4,5
3
+ python scripts/inference_wo_search.py \
4
+ --model_name QwQ-32B \
5
+ --model_path /share/project/zhipengchen/model/QwQ-32B \
6
+ --dataset_name eval \
7
+ --input_file /share/project/sunshuang/deep_search/search_o1/data/eval/eval.json
8
+
9
+
10
+ python scripts/inference_wo_search.py \
11
+ --model_name DeepSeek-R1-Distill-Qwen-32B \
12
+ --model_path /share/project/zhipengchen/model/DeepSeek-R1-Distill-Qwen-32B \
13
+ --dataset_name eval \
14
+ --input_file /share/project/sunshuang/deep_search/search_o1/data/eval/eval.json
15
+
16
+
17
+ python scripts/inference_wo_search.py \
18
+ --model_name Qwen2.5-7B-Instruct \
19
+ --model_path /share/project/zhipengchen/model/Qwen2.5-7B-Instruct \
20
+ --dataset_name eval \
21
+ --input_file /share/project/sunshuang/deep_search/search_o1/data/eval/eval.json
22
+
23
+ python scripts/inference_wo_search.py \
24
+ --model_name Qwen2.5-32B-Instruct \
25
+ --model_path /share/project/zhipengchen/model/Qwen2.5-32B-Instruct \
26
+ --dataset_name eval \
27
+ --input_file /share/project/sunshuang/deep_search/search_o1/data/eval/eval.json
28
+
29
+ # ############################
30
+ # # musique
31
+
32
+
33
+ # export CUDA_VISIBLE_DEVICES=6.7
34
+ # python scripts/inference_wo_search.py \
35
+ # --model_name qwq \
36
+ # --model_path /share/project/zhipengchen/model/QwQ-32B \
37
+ # --dataset_name musique_syn \
38
+ # --input_file /share/project/sunshuang/deep_search/search_o1/data/test/musique_syn.json
39
+
40
+
41
+ # python scripts/inference_wo_search.py \
42
+ # --model_name DeepSeek-R1-Distill-Qwen-32B \
43
+ # --model_path /share/project/zhipengchen/model/DeepSeek-R1-Distill-Qwen-32B \
44
+ # --dataset_name musique_syn \
45
+ # --input_file /share/project/sunshuang/deep_search/search_o1/data/test/musique_syn.json
46
+
47
+
48
+ # python scripts/inference_wo_search.py \
49
+ # --model_name Qwen2.5-7B-Instruct \
50
+ # --model_path /share/project/zhipengchen/model/Qwen2.5-7B-Instruct \
51
+ # --dataset_name musique_syn \
52
+ # --input_file /share/project/sunshuang/deep_search/search_o1/data/test/musique_syn.json
53
+
54
+ # python scripts/inference_wo_search.py \
55
+ # --model_name Qwen2.5-32B-Instruct \
56
+ # --model_path /share/project/zhipengchen/model/Qwen2.5-32B-Instruct \
57
+ # --dataset_name musique_syn \
58
+ # --input_file /share/project/sunshuang/deep_search/search_o1/data/test/musique_syn.json
59
+
deep_search/search_o1/llm_as_judge_source.sh ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # export CUDA_VISIBLE_DEVICES=4,5
3
+ # nohup python -u scripts/llm_as_judge_w_source.py \
4
+ # --model_path /share/project/zhipengchen/model/QwQ-32B/ \
5
+ # --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/musique_syn/turn_13.json \
6
+ # --dataset musique_syn \
7
+ # --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/musique_syn/llm_inf.log 2>&1 &
8
+
9
+
10
+ # export CUDA_VISIBLE_DEVICES=4,5
11
+ # nohup python -u scripts/llm_as_judge_w_source.py \
12
+ # --model_path /share/project/zhipengchen/model/QwQ-32B/ \
13
+ # --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/musique_syn/turn_13.json \
14
+ # --dataset musique_syn \
15
+ # --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/musique_syn/llm_inf.log 2>&1 &
16
+
17
+
18
+ # export CUDA_VISIBLE_DEVICES=4,5
19
+ # nohup python -u scripts/llm_as_judge_w_source.py \
20
+ # --model_path /share/project/zhipengchen/model/QwQ-32B/ \
21
+ # --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/musique_syn/turn_4.json \
22
+ # --dataset musique_syn \
23
+ # --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/musique_syn/llm_inf.log 2>&1 &
24
+
25
+
26
+
27
+ export CUDA_VISIBLE_DEVICES=4,5
28
+ python -u scripts/llm_as_judge_w_source.py \
29
+ --model_path /share/project/zhipengchen/model/QwQ-32B/ \
30
+ --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/musique_syn/turn_13.json \
31
+ --dataset musique_syn \
32
+ --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/musique_syn/llm_inf.log 2>&1
33
+
34
+
35
+ export CUDA_VISIBLE_DEVICES=4,5
36
+ python -u scripts/llm_as_judge_w_source.py \
37
+ --model_path /share/project/zhipengchen/model/QwQ-32B/ \
38
+ --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/musique_syn/turn_13.json \
39
+ --dataset musique_syn \
40
+ --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/musique_syn/llm_inf.log 2>&1
41
+
42
+
43
+ export CUDA_VISIBLE_DEVICES=4,5
44
+ python -u scripts/llm_as_judge_w_source.py \
45
+ --model_path /share/project/zhipengchen/model/QwQ-32B/ \
46
+ --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/musique_syn/turn_4.json \
47
+ --dataset musique_syn \
48
+ --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/musique_syn/llm_inf.log 2>&1
49
+
50
+
51
+
52
+ export CUDA_VISIBLE_DEVICES=4,5
53
+ python -u scripts/llm_as_judge_w_source.py \
54
+ --model_path /share/project/zhipengchen/model/QwQ-32B/ \
55
+ --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_musique_syn/turn_13.json \
56
+ --dataset musique_syn \
57
+ --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_musique_syn/llm_inf.log 2>&1
58
+
59
+
60
+ # /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/syn_zh/turn_10.json
61
+
62
+ # export CUDA_VISIBLE_DEVICES=2,3
63
+ # nohup python -u scripts/llm_as_judge_w_source.py \
64
+ # --model_path /share/project/zhipengchen/model/QwQ-32B/ \
65
+ # --input_file /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/syn_zh/turn_10.json \
66
+ # --dataset musique_syn \
67
+ # --language zh > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/syn_zh/llm_inf.log 2>&1 &
deep_search/search_o1/process_analyses_data.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+ from tqdm import tqdm
6
+ os.environ["CUDA_VISIBLE_DEVICES"] = '6'
7
+
8
+
9
+ def save_to_json(data, filename):
10
+ with open(filename, 'w', encoding='utf-8') as f:
11
+ json.dump(data, f, ensure_ascii=False, indent=4)
12
+ print(f"save to {filename}, data len: {len(data)}")
13
+ def load_json(file_path):
14
+ with open(file_path, "r", encoding="utf-8") as f:
15
+ data = json.load(f)
16
+ print(f"load from {file_path}, data len: {len(data)}")
17
+ return data
18
+
19
+
20
+ if __name__=="__main__":
21
+
22
+ file_path = "/opt/aps/workdir/sunshuang/search_o1/outputs_gen_data_hotpot_qa/runs.baselines/hotpotqa.deepseek-r1-distill-qwen-32.search_o1/batch_0_back/test.2.23,14:26.info_extract.json"
23
+ file_output_path = "/opt/aps/workdir/sunshuang/search_o1/outputs_gen_data_hotpot_qa/runs.baselines/hotpotqa.deepseek-r1-distill-qwen-32.search_o1/batch_0_back/test.2.23,14:26.info_extract_remove_special.json"
24
+ data = load_json(file_path)
25
+
26
+ model_path = "/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32"
27
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
28
+
29
+ print([data[0]["prompt"]])
30
+ data_prompts = [item["prompt"] for item in data]
31
+ prefix_len = len("<|begin▁of▁sentence|><|User|>")
32
+ suffix_len = len("\n<|Assistant|><think>\n")
33
+
34
+ no_help = 0
35
+ data_selected = []
36
+ for item in data:
37
+ if item["extracted_info"] == "No helpful information found.":
38
+ no_help += 1
39
+ continue
40
+ prompt = item["prompt"]
41
+ prompt_remove_special_tokens = prompt[prefix_len:-suffix_len]
42
+ item["prompt"] = prompt_remove_special_tokens
43
+ data_selected.append(item)
44
+
45
+ print(f"no_help: {no_help}, percent: {no_help / len(data)}")
46
+ print(f"selected data: {len(data_selected)}, percent: {len(data_selected) / len(data)}")
47
+ save_to_json(data_selected, file_output_path)
48
+ # print(len("\n<|Assistant|><think>\n"))
49
+ # tokenized_prompts = tokenizer(data_prompts, return_tensors="pt", padding="longest", add_special_tokens=False)["input_ids"]
50
+
51
+ # print(f"tokenized_prompts size: {tokenized_prompts.size()}")
52
+
53
+ # decoded_prompts = tokenizer.batch_decode(tokenized_prompts, skip_special_tokens=False)
54
+
55
+ # processed_data = []
56
+
57
+ # print(type(decoded_prompts))
58
+ # print(decoded_prompts[0])
59
+ # for idx, (item, prompt) in tqdm(enumerate(zip(data, decoded_prompts))):
60
+ # if idx == 3:
61
+ # break
62
+ # item["prompt"] = prompt
63
+ # processed_data.append(item)
64
+
65
+ # save_to_json(processed_data, file_output_path)
deep_search/search_o1/reason_two_model_1.sh ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export http_proxy=http://127.0.0.1:7880
2
+ export https_proxy=http://127.0.0.1:7880
3
+
4
+ # export https_proxy=http://127.0.0.1:7890
5
+ # export http_proxy=http://127.0.0.1:7890
6
+ # export all_proxy=socks5://127.0.0.1:7891
7
+ export CUDA_VISIBLE_DEVICES=4,5
8
+
9
+ model_name=JOB9986:LR1e-5:BASEQwen2.5-32B-Instruct:TOKENDeepSeek-R1-Distill-Qwen-32:BSZ1:ACC8/checkpoint-38
10
+
11
+ python scripts/reason_two_model_2.py \
12
+ --dataset_name hotpotqa \
13
+ --cache_dir_base cache_reason_two_model//eval_reason_two_model_r1_32b_doc_by_qwen7b_inst \
14
+ --output_dir_base outputs_reason_two_model/eval_reason_two_model_r1_32b_doc_by_qwen7b_inst \
15
+ --split test \
16
+ --max_search_limit 5 \
17
+ --max_turn 10 \
18
+ --top_k 5 \
19
+ --max_doc_len 3000 \
20
+ --subset_num 500 \
21
+ --model_path "/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32" \
22
+ --model_doc_reason_path "/opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct" \
23
+ --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
24
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
25
+ --bing_endpoint "https://google.serper.dev/search" \
26
+ --openai_api_base "http://localhost:8000/v1"
27
+
28
+
29
+ python scripts/reason_two_model_2.py \
30
+ --dataset_name hotpotqa \
31
+ --cache_dir_base cache_reason_two_model/eval_reason_two_model_r1_32b_doc_by_qwen32b_inst \
32
+ --output_dir_base outputs_reason_two_model/eval_reason_two_model_r1_32b_doc_by_qwen32b_inst \
33
+ --split test \
34
+ --max_search_limit 5 \
35
+ --max_turn 10 \
36
+ --top_k 5 \
37
+ --max_doc_len 3000 \
38
+ --subset_num 500 \
39
+ --model_path "/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32" \
40
+ --model_doc_reason_path "/capacity/userdata/models/Qwen2.5-32B-Instruct" \
41
+ --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
42
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
43
+ --bing_endpoint "https://google.serper.dev/search" \
44
+ --openai_api_base "http://localhost:8001/v1"
45
+
46
+ # python scripts/reason_two_model_2.py \
47
+ # --dataset_name hotpotqa \
48
+ # --cache_dir_base cache_reason_two_model/eval_qwen7b_inst_qwq \
49
+ # --output_dir_base outputs_reason_two_model/eval_qwen7b_inst_qwq \
50
+ # --split test \
51
+ # --max_search_limit 5 \
52
+ # --max_turn 10 \
53
+ # --top_k 5 \
54
+ # --max_doc_len 3000 \
55
+ # --subset_num 100 \
56
+ # --model_path "/opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct" \
57
+ # --model_doc_reason_path "/capacity/userdata/models/QwQ-32B-Preview" \
58
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
59
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
60
+ # --bing_endpoint "https://google.serper.dev/search"
deep_search/search_o1/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch==2.5.1
2
+ transformers==4.46.1
3
+ sentencepiece==0.2.0
4
+ vllm==0.6.4
5
+ tqdm==4.67.0
6
+ nltk==3.9.1
7
+ pyext==0.7
deep_search/search_o1/run_eval_1.sh ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export http_proxy=http://127.0.0.1:7880
2
+ export https_proxy=http://127.0.0.1:7880
3
+ # export https_proxy=http://127.0.0.1:7890
4
+ # export http_proxy=http://127.0.0.1:7890
5
+ # export all_proxy=socks5://127.0.0.1:7891
6
+ export CUDA_VISIBLE_DEVICES=2,3
7
+
8
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/reason_two_model_1.py \
9
+ # --dataset_name hotpotqa \
10
+ # --cache_dir_base cache/eval \
11
+ # --output_dir_base outputs/eval_qwen_2_5_7b \
12
+ # --split test \
13
+ # --max_search_limit 5 \
14
+ # --max_turn 10 \
15
+ # --top_k 5 \
16
+ # --max_doc_len 3000 \
17
+ # --subset_num 100 \
18
+ # --model_path "/opt/aps/workdir/output/checkpoint/qwen_2_5_7b_test" \
19
+ # --model_doc_reason_path "/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32" \
20
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
21
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
22
+ # --bing_endpoint "https://google.serper.dev/search"
23
+
24
+ # 评测1217条数据训练前和训练后的qwen2.5 7B Instruct model
25
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
26
+ # --dataset_name hotpotqa \
27
+ # --cache_dir_base cache/eval \
28
+ # --output_dir_base outputs/eval_qwen_7b_inst_data_1217 \
29
+ # --split test \
30
+ # --max_search_limit 5 \
31
+ # --max_turn 10 \
32
+ # --top_k 5 \
33
+ # --max_doc_len 3000 \
34
+ # --subset_num 100 \
35
+ # --model_path "/opt/aps/workdir/output/checkpoint/qwen_7b_inst_data_1217" \
36
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
37
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
38
+ # --bing_endpoint "https://google.serper.dev/search"
39
+
40
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
41
+ # --dataset_name hotpotqa \
42
+ # --cache_dir_base cache/eval \
43
+ # --output_dir_base outputs/eval_original_qwen_7b_inst \
44
+ # --split test \
45
+ # --max_search_limit 5 \
46
+ # --max_turn 10 \
47
+ # --top_k 5 \
48
+ # --max_doc_len 3000 \
49
+ # --subset_num 100 \
50
+ # --model_path "/opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct" \
51
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
52
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
53
+ # --bing_endpoint "https://google.serper.dev/search"
54
+
55
+ # /opt/aps/workdir/output/checkpoint/qwen_7b_original_tokenizer_inst_data_1217
56
+
57
+ # 将tokenizer换回自己的
58
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
59
+ # --dataset_name hotpotqa \
60
+ # --cache_dir_base cache/eval \
61
+ # --output_dir_base outputs/eval_qwen_7b_original_tokenizer_inst_data_1217 \
62
+ # --split test \
63
+ # --max_search_limit 5 \
64
+ # --max_turn 10 \
65
+ # --top_k 5 \
66
+ # --max_doc_len 3000 \
67
+ # --subset_num 100 \
68
+ # --model_path "/opt/aps/workdir/output/checkpoint/qwen_7b_original_tokenizer_inst_data_1217" \
69
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
70
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
71
+ # --bing_endpoint "https://google.serper.dev/search"
72
+
73
+
74
+ # /opt/aps/workdir/output/sft_use_original_tokenizer/qwen_7b_original_tokenizer_inst_data_1217
75
+
76
+ /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/reason_two_model_4.py \
77
+ --dataset_name hotpotqa \
78
+ --cache_dir_base cache_real_change_tokenizer/eval_test_wheel \
79
+ --output_dir_base outputs_real_change_tokenizer/eval_qwen_7b_original_tokenizer_inst_data_1217_test_wheel \
80
+ --split test \
81
+ --max_search_limit 5 \
82
+ --max_turn 10 \
83
+ --top_k 5 \
84
+ --max_doc_len 3000 \
85
+ --subset_num 3 \
86
+ --model_path "/opt/aps/workdir/output/sft_use_original_tokenizer/qwen_7b_original_tokenizer_inst_data_1217" \
87
+ --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
88
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
89
+ --model_doc_reason_path "/capacity/userdata/models/Qwen2.5-32B-Instruct" \
90
+ --bing_endpoint "https://google.serper.dev/search" \
91
+ --openai_api_base "http://localhost:8001/v1"
deep_search/search_o1/run_eval_2.sh ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export http_proxy=http://127.0.0.1:7880
2
+ export https_proxy=http://127.0.0.1:7880
3
+
4
+ export CUDA_VISIBLE_DEVICES=5
5
+
6
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/reason_two_model_1.py \
7
+ # --dataset_name hotpotqa \
8
+ # --cache_dir_base cache/eval \
9
+ # --output_dir_base outputs/eval_qwen_2_5_7b \
10
+ # --split test \
11
+ # --max_search_limit 5 \
12
+ # --max_turn 10 \
13
+ # --top_k 5 \
14
+ # --max_doc_len 3000 \
15
+ # --subset_num 100 \
16
+ # --model_path "/opt/aps/workdir/output/checkpoint/qwen_2_5_7b_test" \
17
+ # --model_doc_reason_path "/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32" \
18
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
19
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
20
+ # --bing_endpoint "https://google.serper.dev/search"
21
+
22
+ # 测试一下vllm serve+离线推理的代码
23
+ /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/reason_two_model_2.py \
24
+ --dataset_name hotpotqa \
25
+ --cache_dir_base cache/test_reason_two_model_2 \
26
+ --output_dir_base outputs/test_reason_two_model_2 \
27
+ --split test \
28
+ --max_search_limit 5 \
29
+ --max_turn 10 \
30
+ --top_k 5 \
31
+ --max_doc_len 3000 \
32
+ --subset_num 5 \
33
+ --model_path "/opt/aps/workdir/output/checkpoint/qwen_7b_inst_data_1217" \
34
+ --model_doc_reason_path "/opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct" \
35
+ --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
36
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
37
+ --bing_endpoint "https://google.serper.dev/search"
38
+
39
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
40
+ # --dataset_name hotpotqa \
41
+ # --cache_dir_base cache/eval \
42
+ # --output_dir_base outputs/eval_original_qwen_7b_inst \
43
+ # --split test \
44
+ # --max_search_limit 5 \
45
+ # --max_turn 10 \
46
+ # --top_k 5 \
47
+ # --max_doc_len 3000 \
48
+ # --subset_num 100 \
49
+ # --model_path "/opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct" \
50
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
51
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
52
+ # --bing_endpoint "https://google.serper.dev/search"
deep_search/search_o1/run_eval_3.sh ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export http_proxy=http://127.0.0.1:7880
2
+ export https_proxy=http://127.0.0.1:7880
3
+
4
+ export CUDA_VISIBLE_DEVICES=2,3
5
+
6
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/reason_two_model_1.py \
7
+ # --dataset_name hotpotqa \
8
+ # --cache_dir_base cache/eval \
9
+ # --output_dir_base outputs/eval_qwen_2_5_7b \
10
+ # --split test \
11
+ # --max_search_limit 5 \
12
+ # --max_turn 10 \
13
+ # --top_k 5 \
14
+ # --max_doc_len 3000 \
15
+ # --subset_num 100 \
16
+ # --model_path "/opt/aps/workdir/output/checkpoint/qwen_2_5_7b_test" \
17
+ # --model_doc_reason_path "/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32" \
18
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
19
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
20
+ # --bing_endpoint "https://google.serper.dev/search"
21
+
22
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
23
+ # --dataset_name hotpotqa \
24
+ # --cache_dir_base cache_modify_extract_answer/eval \
25
+ # --output_dir_base outputs_modify_extract_answer/eval_qwen_2_5_32b_data_1217 \
26
+ # --split test \
27
+ # --max_search_limit 5 \
28
+ # --max_turn 10 \
29
+ # --top_k 5 \
30
+ # --max_doc_len 3000 \
31
+ # --subset_num 100 \
32
+ # --model_path "/opt/aps/workdir/output/checkpoint/qwen_2_5_32b_data_1217" \
33
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
34
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
35
+ # --bing_endpoint "https://google.serper.dev/search"
36
+
37
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
38
+ # --dataset_name hotpotqa \
39
+ # --cache_dir_base cache_modify_extract_answer/eval \
40
+ # --output_dir_base outputs_modify_extract_answer/eval_original_qwen_2_5_32b \
41
+ # --split test \
42
+ # --max_search_limit 5 \
43
+ # --max_turn 10 \
44
+ # --top_k 5 \
45
+ # --max_doc_len 3000 \
46
+ # --subset_num 100 \
47
+ # --model_path "/capacity/userdata/models/Qwen2.5-32B-Instruct" \
48
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
49
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
50
+ # --bing_endpoint "https://google.serper.dev/search"
51
+
52
+
53
+
54
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
55
+ # --dataset_name hotpotqa \
56
+ # --cache_dir_base cache_modify_extract_answer/eval \
57
+ # --output_dir_base outputs_modify_extract_answer/eval_qwen_7b_inst_data_1217 \
58
+ # --split test \
59
+ # --max_search_limit 5 \
60
+ # --max_turn 10 \
61
+ # --top_k 5 \
62
+ # --max_doc_len 3000 \
63
+ # --subset_num 100 \
64
+ # --model_path "/opt/aps/workdir/output/checkpoint/qwen_7b_inst_data_1217" \
65
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
66
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
67
+ # --bing_endpoint "https://google.serper.dev/search"
68
+
69
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
70
+ # --dataset_name hotpotqa \
71
+ # --cache_dir_base cache_modify_extract_answer/eval \
72
+ # --output_dir_base outputs_modify_extract_answer/eval_original_qwen_7b_inst \
73
+ # --split test \
74
+ # --max_search_limit 5 \
75
+ # --max_turn 10 \
76
+ # --top_k 5 \
77
+ # --max_doc_len 3000 \
78
+ # --subset_num 100 \
79
+ # --model_path "/opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct" \
80
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
81
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
82
+ # --bing_endpoint "https://google.serper.dev/search"
83
+
84
+ # qwen32b
85
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
86
+ # --dataset_name hotpotqa \
87
+ # --cache_dir_base cache_eval/eval_hotpotqa_dev_500 \
88
+ # --output_dir_base output_eval/eval_hopotqa_dev_500_original_qwen_32b_inst \
89
+ # --split test \
90
+ # --max_search_limit 5 \
91
+ # --max_turn 10 \
92
+ # --top_k 5 \
93
+ # --max_doc_len 3000 \
94
+ # --subset_num 500 \
95
+ # --model_path "/capacity/userdata/models/Qwen2.5-32B-Instruct" \
96
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
97
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
98
+ # --bing_endpoint "https://google.serper.dev/search"
99
+
100
+
101
+ datasets=('2wiki' 'hotpotqa' 'musique')
102
+
103
+
104
+ for dataset in "${datasets[@]}"; do
105
+ echo "Running evaluation for dataset: $dataset"
106
+
107
+ cache_dir_base="cache_eval/eval_${dataset}_dev_500_1"
108
+ output_dir_base="output_eval/eval_${dataset}_dev_500_qwen_32b_original_tokenizer_inst_data_1217_1"
109
+
110
+ mkdir -p "$cache_dir_base"
111
+ mkdir -p "$output_dir_base"
112
+
113
+ /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
114
+ --dataset_name "$dataset" \
115
+ --cache_dir_base "$cache_dir_base" \
116
+ --output_dir_base "$output_dir_base" \
117
+ --split test \
118
+ --max_search_limit 5 \
119
+ --max_turn 10 \
120
+ --top_k 5 \
121
+ --max_doc_len 3000 \
122
+ --subset_num 100 \
123
+ --model_path "/opt/aps/workdir/output/sft_use_original_tokenizer/qwen_32b_original_tokenizer_inst_data_1217" \
124
+ --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
125
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
126
+ --bing_endpoint "https://google.serper.dev/search"
127
+ done
128
+
129
+
130
+ /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
131
+ --dataset_name hotpotqa \
132
+ --cache_dir_base cache_eval/eval_hotpotqa_dev_500_1 \
133
+ --output_dir_base output_eval/eval_hopotqa_dev_500_qwen_32b_original_tokenizer_inst_data_1217_1 \
134
+ --split test \
135
+ --max_search_limit 5 \
136
+ --max_turn 10 \
137
+ --top_k 5 \
138
+ --max_doc_len 3000 \
139
+ --subset_num 500 \
140
+ --model_path "/opt/aps/workdir/output/sft_use_original_tokenizer/qwen_32b_original_tokenizer_inst_data_1217" \
141
+ --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
142
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
143
+ --bing_endpoint "https://google.serper.dev/search"
144
+
145
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
146
+ # --dataset_name hotpotqa \
147
+ # --cache_dir_base cache_eval/eval_hotpotqa_dev_500 \
148
+ # --output_dir_base output_eval/eval_hopotqa_dev_500_original_r1_32b \
149
+ # --split test \
150
+ # --max_search_limit 5 \
151
+ # --max_turn 10 \
152
+ # --top_k 5 \
153
+ # --max_doc_len 3000 \
154
+ # --subset_num 500 \
155
+ # --model_path "/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32" \
156
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
157
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
158
+ # --bing_endpoint "https://google.serper.dev/search"
159
+
160
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
161
+ # --dataset_name hotpotqa \
162
+ # --cache_dir_base cache_eval/eval_hotpotqa_dev_500 \
163
+ # --output_dir_base output_eval/eval_hopotqa_dev_500_original_qwen_7b_inst \
164
+ # --split test \
165
+ # --max_search_limit 5 \
166
+ # --max_turn 10 \
167
+ # --top_k 5 \
168
+ # --max_doc_len 3000 \
169
+ # --subset_num 500 \
170
+ # --model_path "/opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct" \
171
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
172
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
173
+ # --bing_endpoint "https://google.serper.dev/search"
174
+
175
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
176
+ # --dataset_name hotpotqa \
177
+ # --cache_dir_base cache_modify_extract_answer/eval \
178
+ # --output_dir_base outputs_modify_extract_answer/eval_original_qwq \
179
+ # --split test \
180
+ # --max_search_limit 5 \
181
+ # --max_turn 10 \
182
+ # --top_k 5 \
183
+ # --max_doc_len 3000 \
184
+ # --subset_num 100 \
185
+ # --model_path "/capacity/userdata/models/QwQ-32B-Preview" \
186
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
187
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
188
+ # --bing_endpoint "https://google.serper.dev/search"
189
+
190
+
191
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
192
+ # --dataset_name hotpotqa \
193
+ # --cache_dir_base cache/eval \
194
+ # --output_dir_base outputs/eval_llama3_8b \
195
+ # --split test \
196
+ # --max_search_limit 5 \
197
+ # --max_turn 10 \
198
+ # --top_k 5 \
199
+ # --max_doc_len 3000 \
200
+ # --subset_num 100 \
201
+ # --model_path "/opt/aps/workdir/output/checkpoint/llama3_8b_data_1217" \
202
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
203
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
204
+ # --bing_endpoint "https://google.serper.dev/search"
205
+
206
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_eval.py \
207
+ # --dataset_name hotpotqa \
208
+ # --cache_dir_base cache/eval \
209
+ # --output_dir_base outputs/eval_original_llama3_8b \
210
+ # --split test \
211
+ # --max_search_limit 5 \
212
+ # --max_turn 10 \
213
+ # --top_k 5 \
214
+ # --max_doc_len 3000 \
215
+ # --subset_num 100 \
216
+ # --model_path "/capacity/userdata/models/QwQ-32B-Preview" \
217
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
218
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
219
+ # --bing_endpoint "https://google.serper.dev/search"
220
+
deep_search/search_o1/run_eval_benchmark.sh ADDED
@@ -0,0 +1,691 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=0,1
2
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
3
+ --dataset_name eval \
4
+ --cache_dir_base cache_eval_sum_all_webpage \
5
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_ckpt176 \
6
+ --split test \
7
+ --max_search_limit 10 \
8
+ --max_turn 10 \
9
+ --top_k 10 \
10
+ --max_doc_len 5000 \
11
+ --model_path "/share/project/sunshuang/deep_search/search_o1/model/checkpoint-176" \
12
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
13
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_ckpt176.log 2>&1 &
14
+
15
+ export CUDA_VISIBLE_DEVICES=2,3
16
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
17
+ --dataset_name eval \
18
+ --cache_dir_base cache_eval_sum_all_webpage \
19
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_temp_0 \
20
+ --split test \
21
+ --max_search_limit 10 \
22
+ --max_turn 10 \
23
+ --top_k 10 \
24
+ --max_doc_len 5000 \
25
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
26
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
27
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_temp_0.log 2>&1 &
28
+
29
+ export CUDA_VISIBLE_DEVICES=2,3
30
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
31
+ --dataset_name eval \
32
+ --cache_dir_base cache_eval_sum_all_webpage \
33
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_temp_0 \
34
+ --split test \
35
+ --max_search_limit 10 \
36
+ --max_turn 10 \
37
+ --top_k 10 \
38
+ --max_doc_len 5000 \
39
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
40
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
41
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_temp_0.log 2>&1 &
42
+
43
+
44
+ # /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-138
45
+
46
+ export CUDA_VISIBLE_DEVICES=4,5
47
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
48
+ --dataset_name eval \
49
+ --cache_dir_base cache_eval_sum_all_webpage \
50
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_sft_1482_ckpt138 \
51
+ --split test \
52
+ --max_search_limit 10 \
53
+ --max_turn 10 \
54
+ --top_k 10 \
55
+ --max_doc_len 5000 \
56
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-138" \
57
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
58
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_sft_1482_ckpt138.log 2>&1 &
59
+
60
+
61
+
62
+ export CUDA_VISIBLE_DEVICES=4,5
63
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
64
+ --dataset_name eval \
65
+ --cache_dir_base cache_eval_sum_all_webpage \
66
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_sft_1482_ckpt138 \
67
+ --split test \
68
+ --max_search_limit 10 \
69
+ --max_turn 10 \
70
+ --top_k 10 \
71
+ --max_doc_len 5000 \
72
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-138" \
73
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
74
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_sft_1482_ckpt138.log 2>&1 &
75
+
76
+
77
+ # /share/project/sunshuang/deep_search/output/JOB:20762#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-138
78
+
79
+ export CUDA_VISIBLE_DEVICES=0,1
80
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
81
+ --dataset_name eval \
82
+ --cache_dir_base cache_eval_sum_all_webpage \
83
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_r132b_sft_1478_ckpt138 \
84
+ --split test \
85
+ --max_search_limit 10 \
86
+ --max_turn 10 \
87
+ --top_k 10 \
88
+ --max_doc_len 5000 \
89
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:20762#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-138" \
90
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
91
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_r132b_sft_1478_ckpt138.log 2>&1 &
92
+
93
+ export CUDA_VISIBLE_DEVICES=2,3
94
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
95
+ --dataset_name eval \
96
+ --cache_dir_base cache_eval_sum_all_webpage \
97
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_r132b \
98
+ --split test \
99
+ --max_search_limit 10 \
100
+ --max_turn 10 \
101
+ --top_k 10 \
102
+ --max_doc_len 5000 \
103
+ --model_path "/share/project/zhipengchen/model/DeepSeek-R1-Distill-Qwen-32B" \
104
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
105
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_r132b.log 2>&1 &
106
+
107
+
108
+
109
+ export CUDA_VISIBLE_DEVICES=2,3
110
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
111
+ --dataset_name eval \
112
+ --cache_dir_base cache_eval_sum_all_webpage \
113
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_difficult_rollout_data_sft_add_prompt_ckpt123 \
114
+ --split test \
115
+ --max_search_limit 10 \
116
+ --max_turn 10 \
117
+ --top_k 10 \
118
+ --max_doc_len 5000 \
119
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:25969#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_difficult_rollout_data_sft_add_prompt.json/checkpoint-123" \
120
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
121
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_difficult_rollout_data_sft_add_prompt_ckpt123.log 2>&1 &
122
+
123
+
124
+
125
+ export CUDA_VISIBLE_DEVICES=0,1
126
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
127
+ --dataset_name eval \
128
+ --cache_dir_base cache_eval_sum_all_webpage \
129
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_difficult_rollout_data_sft_add_prompt_ckpt246 \
130
+ --split test \
131
+ --max_search_limit 10 \
132
+ --max_turn 10 \
133
+ --top_k 10 \
134
+ --max_doc_len 5000 \
135
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:25969#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_difficult_rollout_data_sft_add_prompt.json/checkpoint-246" \
136
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
137
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_difficult_rollout_data_sft_add_prompt_ckpt246.log 2>&1 &
138
+
139
+
140
+
141
+ ######################
142
+ # 使用最新的配置测试模型
143
+ export CUDA_VISIBLE_DEVICES=0,1
144
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
145
+ --dataset_name eval \
146
+ --cache_dir_base cache_eval_sum_all_webpage \
147
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_new_setting \
148
+ --split test \
149
+ --max_search_limit 10 \
150
+ --max_turn 10 \
151
+ --top_k 10 \
152
+ --max_doc_len 5000 \
153
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
154
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
155
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_new_setting.log 2>&1 &
156
+
157
+
158
+ export CUDA_VISIBLE_DEVICES=2,3
159
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
160
+ --dataset_name eval \
161
+ --cache_dir_base cache_eval_sum_all_webpage \
162
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_r1_32b_new_setting \
163
+ --split test \
164
+ --max_search_limit 10 \
165
+ --max_turn 10 \
166
+ --top_k 10 \
167
+ --max_doc_len 5000 \
168
+ --model_path "/share/project/zhipengchen/model/DeepSeek-R1-Distill-Qwen-32B" \
169
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
170
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_r1_32b_new_setting.log 2>&1 &
171
+
172
+ export CUDA_VISIBLE_DEVICES=4,5
173
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
174
+ --dataset_name eval \
175
+ --cache_dir_base cache_eval_sum_all_webpage \
176
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_setting \
177
+ --split test \
178
+ --max_search_limit 10 \
179
+ --max_turn 10 \
180
+ --top_k 10 \
181
+ --max_doc_len 5000 \
182
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-32B-Instruct" \
183
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
184
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwen32b_inst_new_setting.log 2>&1 &
185
+
186
+
187
+ # /share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-138
188
+ export CUDA_VISIBLE_DEVICES=6,7
189
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
190
+ --dataset_name eval \
191
+ --cache_dir_base cache_eval_sum_all_webpage \
192
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/138/eval_new_setting \
193
+ --split test \
194
+ --max_search_limit 10 \
195
+ --max_turn 10 \
196
+ --top_k 10 \
197
+ --max_doc_len 5000 \
198
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/checkpoint-138" \
199
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
200
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16816#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_correct_solutions_sft_add_prompt.json/138/eval_new_setting/inf.log 2>&1 &
201
+
202
+ # /share/project/sunshuang/deep_search/search_o1/model/checkpoint-176
203
+ export CUDA_VISIBLE_DEVICES=6,7
204
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
205
+ --dataset_name eval \
206
+ --cache_dir_base cache_eval_sum_all_webpage \
207
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_ckpt176_new_setting \
208
+ --split test \
209
+ --max_search_limit 10 \
210
+ --max_turn 10 \
211
+ --top_k 10 \
212
+ --max_doc_len 5000 \
213
+ --model_path "/share/project/sunshuang/deep_search/search_o1/model/checkpoint-176" \
214
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
215
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/sum_all_webpage_ckpt176_new_setting.log 2>&1 &
216
+
217
+ export CUDA_VISIBLE_DEVICES=2,3
218
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
219
+ --dataset_name eval \
220
+ --cache_dir_base cache_eval_sum_all_webpage \
221
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_eval \
222
+ --split test \
223
+ --max_search_limit 10 \
224
+ --max_turn 10 \
225
+ --top_k 10 \
226
+ --max_doc_len 5000 \
227
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
228
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
229
+ --bing_endpoint "https://google.serper.dev/search" > output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_eval/inf.log 2>&1 &
230
+
231
+
232
+ export CUDA_VISIBLE_DEVICES=6,7
233
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_demo.py \
234
+ --dataset_name eval \
235
+ --cache_dir_base cache_eval_sum_all_webpage \
236
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/eval_wiki \
237
+ --split test \
238
+ --max_search_limit 10 \
239
+ --max_turn 10 \
240
+ --top_k 10 \
241
+ --max_doc_len 5000 \
242
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
243
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
244
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/eval_wiki/inf.log 2>&1
245
+
246
+ ##################################################
247
+ # musique_syn
248
+ export CUDA_VISIBLE_DEVICES=6,7
249
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
250
+ --dataset_name musique_syn \
251
+ --cache_dir_base cache_eval_sum_all_webpage \
252
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/testt \
253
+ --split test \
254
+ --max_search_limit 10 \
255
+ --max_turn 10 \
256
+ --top_k 10 \
257
+ --max_doc_len 5000 \
258
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
259
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
260
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/eval_wiki/test.log 2>&1 &
261
+
262
+
263
+ ##########################
264
+ # aime
265
+ export CUDA_VISIBLE_DEVICES=0,1
266
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
267
+ --dataset_name aime \
268
+ --cache_dir_base cache_eval_sum_all_webpage \
269
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/aime \
270
+ --split test \
271
+ --max_search_limit 10 \
272
+ --max_turn 10 \
273
+ --top_k 10 \
274
+ --max_doc_len 5000 \
275
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-73" \
276
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
277
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:7405#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft/73/aime/inf.log 2>&1 &
278
+
279
+
280
+
281
+
282
+ export CUDA_VISIBLE_DEVICES=2,3
283
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
284
+ --dataset_name aime \
285
+ --cache_dir_base cache_eval_sum_all_webpage \
286
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/91/aime \
287
+ --split test \
288
+ --max_search_limit 10 \
289
+ --max_turn 10 \
290
+ --top_k 10 \
291
+ --max_doc_len 5000 \
292
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91" \
293
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
294
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:5303#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft/91/aime/inf.log 2>&1 &
295
+
296
+ export CUDA_VISIBLE_DEVICES=4,5
297
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
298
+ --dataset_name aime \
299
+ --cache_dir_base cache_eval_sum_all_webpage \
300
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/aime \
301
+ --split test \
302
+ --max_search_limit 10 \
303
+ --max_turn 10 \
304
+ --top_k 10 \
305
+ --max_doc_len 5000 \
306
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/checkpoint-91" \
307
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
308
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11354#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4_new_instruction_2k_sft/91/aime/inf.log 2>&1 &
309
+
310
+
311
+ export CUDA_VISIBLE_DEVICES=6,7
312
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
313
+ --dataset_name aime \
314
+ --cache_dir_base cache_eval_sum_all_webpage \
315
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_aime \
316
+ --split test \
317
+ --max_search_limit 10 \
318
+ --max_turn 10 \
319
+ --top_k 10 \
320
+ --max_doc_len 5000 \
321
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
322
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
323
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/sum_all_webpage_qwq_new_setting_qwq.log 2>&1 &
324
+
325
+
326
+
327
+ export CUDA_VISIBLE_DEVICES=6,7
328
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
329
+ --dataset_name aime \
330
+ --cache_dir_base cache_eval_sum_all_webpage \
331
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_aime \
332
+ --split test \
333
+ --max_search_limit 10 \
334
+ --max_turn 10 \
335
+ --top_k 10 \
336
+ --max_doc_len 5000 \
337
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
338
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
339
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/sum_all_webpage_qwq_new_setting_qwq.log 2>&1 &
340
+
341
+
342
+
343
+ # /share/project/sunshuang/deep_search/output/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73
344
+ export CUDA_VISIBLE_DEVICES=6,7
345
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
346
+ --dataset_name aime \
347
+ --cache_dir_base cache_eval_sum_all_webpage \
348
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/154/aime \
349
+ --split test \
350
+ --max_search_limit 10 \
351
+ --max_turn 10 \
352
+ --top_k 10 \
353
+ --max_doc_len 5000 \
354
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-154" \
355
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
356
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/154/aime/inf.log 2>&1 &
357
+
358
+
359
+
360
+ export CUDA_VISIBLE_DEVICES=6,7
361
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
362
+ --dataset_name aime \
363
+ --cache_dir_base cache_eval_sum_all_webpage \
364
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/aime \
365
+ --split test \
366
+ --max_search_limit 10 \
367
+ --max_turn 10 \
368
+ --top_k 10 \
369
+ --max_doc_len 5000 \
370
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91" \
371
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
372
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/aime/inf.log 2>&1 &
373
+
374
+ export CUDA_VISIBLE_DEVICES=4,5
375
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
376
+ --dataset_name aime \
377
+ --cache_dir_base cache_eval_sum_all_webpage \
378
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_aime \
379
+ --split test \
380
+ --max_search_limit 10 \
381
+ --max_turn 10 \
382
+ --top_k 10 \
383
+ --max_doc_len 5000 \
384
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
385
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
386
+ --bing_endpoint "https://google.serper.dev/search" > output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_aime/inf.log 2>&1 &
387
+
388
+
389
+
390
+
391
+ export CUDA_VISIBLE_DEVICES=2,3
392
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
393
+ --dataset_name aime \
394
+ --cache_dir_base cache_eval_sum_all_webpage \
395
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/aime \
396
+ --split test \
397
+ --max_search_limit 10 \
398
+ --max_turn 10 \
399
+ --top_k 10 \
400
+ --max_doc_len 5000 \
401
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-228" \
402
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
403
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/aime/inf.log 2>&1 &
404
+
405
+
406
+ ######################
407
+ # 测试中文
408
+ export CUDA_VISIBLE_DEVICES=0,1
409
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
410
+ --dataset_name new \
411
+ --cache_dir_base cache_eval_sum_all_webpage \
412
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/new \
413
+ --split test \
414
+ --max_search_limit 10 \
415
+ --max_turn 10 \
416
+ --top_k 10 \
417
+ --max_doc_len 5000 \
418
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73" \
419
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
420
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/new/inf.log 2>&1 &
421
+
422
+
423
+ export CUDA_VISIBLE_DEVICES=2,3
424
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
425
+ --dataset_name new \
426
+ --cache_dir_base cache_eval_sum_all_webpage \
427
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_new_setting_new \
428
+ --split test \
429
+ --max_search_limit 10 \
430
+ --max_turn 10 \
431
+ --top_k 10 \
432
+ --max_doc_len 5000 \
433
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
434
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
435
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_new_setting_new.log 2>&1 &
436
+
437
+
438
+
439
+
440
+ export CUDA_VISIBLE_DEVICES=4,5
441
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
442
+ --dataset_name new \
443
+ --cache_dir_base cache_eval_sum_all_webpage \
444
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/154/new \
445
+ --split test \
446
+ --max_search_limit 10 \
447
+ --max_turn 10 \
448
+ --top_k 10 \
449
+ --max_doc_len 5000 \
450
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-154" \
451
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
452
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/154/new/inf.log 2>&1 &
453
+
454
+
455
+
456
+
457
+ export CUDA_VISIBLE_DEVICES=6,7
458
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
459
+ --dataset_name new \
460
+ --cache_dir_base cache_eval_sum_all_webpage \
461
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_setting_new \
462
+ --split test \
463
+ --max_search_limit 10 \
464
+ --max_turn 10 \
465
+ --top_k 10 \
466
+ --max_doc_len 5000 \
467
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-32B-Instruct" \
468
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
469
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwen32b_inst_new_setting_new.log 2>&1 &
470
+
471
+
472
+
473
+ #############
474
+ # 人工合成的数据
475
+ export CUDA_VISIBLE_DEVICES=0,1
476
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
477
+ --dataset_name syn_zh \
478
+ --cache_dir_base cache_eval_sum_all_webpage \
479
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_zh \
480
+ --split test \
481
+ --max_search_limit 10 \
482
+ --max_turn 10 \
483
+ --top_k 10 \
484
+ --max_doc_len 5000 \
485
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73" \
486
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
487
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/syn_zh/inf.log 2>&1 &
488
+
489
+
490
+ export CUDA_VISIBLE_DEVICES=2,3
491
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
492
+ --dataset_name syn_zh \
493
+ --cache_dir_base cache_eval_sum_all_webpage \
494
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq_new_setting_syn_zh \
495
+ --split test \
496
+ --max_search_limit 10 \
497
+ --max_turn 10 \
498
+ --top_k 10 \
499
+ --max_doc_len 5000 \
500
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
501
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
502
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq_new_setting_syn_zh.log 2>&1 &
503
+
504
+
505
+
506
+
507
+ export CUDA_VISIBLE_DEVICES=4,5
508
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
509
+ --dataset_name syn_zh \
510
+ --cache_dir_base cache_eval_sum_all_webpage \
511
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/154/syn_zh \
512
+ --split test \
513
+ --max_search_limit 10 \
514
+ --max_turn 10 \
515
+ --top_k 10 \
516
+ --max_doc_len 5000 \
517
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-154" \
518
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
519
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/logs/log_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/154/syn_zh/inf.log 2>&1 &
520
+
521
+
522
+ export CUDA_VISIBLE_DEVICES=0,1
523
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
524
+ --dataset_name syn_zh \
525
+ --cache_dir_base cache_eval_sum_all_webpage \
526
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/syn_zh \
527
+ --split test \
528
+ --max_search_limit 10 \
529
+ --max_turn 10 \
530
+ --top_k 10 \
531
+ --max_doc_len 5000 \
532
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-228" \
533
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
534
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/syn_zh/inf.log 2>&1 &
535
+
536
+
537
+
538
+ export CUDA_VISIBLE_DEVICES=4,5
539
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
540
+ --dataset_name eval \
541
+ --cache_dir_base cache_eval_sum_all_webpage \
542
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/eval_1 \
543
+ --split test \
544
+ --max_search_limit 10 \
545
+ --max_turn 10 \
546
+ --top_k 10 \
547
+ --max_doc_len 5000 \
548
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-228" \
549
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
550
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/eval_1/inf.log 2>&1 &
551
+
552
+
553
+
554
+ # /share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96
555
+ export CUDA_VISIBLE_DEVICES=2,3
556
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
557
+ --dataset_name eval \
558
+ --cache_dir_base cache_eval_sum_all_webpage \
559
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval \
560
+ --split test \
561
+ --max_search_limit 10 \
562
+ --max_turn 10 \
563
+ --top_k 10 \
564
+ --max_doc_len 5000 \
565
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96" \
566
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
567
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
568
+
569
+
570
+ export CUDA_VISIBLE_DEVICES=4,5
571
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
572
+ --dataset_name realqa \
573
+ --cache_dir_base cache_eval_sum_all_webpage \
574
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval \
575
+ --split test \
576
+ --max_search_limit 10 \
577
+ --max_turn 10 \
578
+ --top_k 10 \
579
+ --max_doc_len 5000 \
580
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-96" \
581
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
582
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/96/eval/inf.log 2>&1 &
583
+
584
+
585
+
586
+ export CUDA_VISIBLE_DEVICES=0,1
587
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
588
+ --dataset_name eval \
589
+ --cache_dir_base cache_eval_sum_all_webpage \
590
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_prompt_eval \
591
+ --split test \
592
+ --max_search_limit 10 \
593
+ --max_turn 10 \
594
+ --top_k 10 \
595
+ --max_doc_len 5000 \
596
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
597
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
598
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_prompt_eval/inf.log 2>&1 &
599
+
600
+
601
+ export CUDA_VISIBLE_DEVICES=2,3
602
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
603
+ --dataset_name eval \
604
+ --cache_dir_base cache_eval_sum_all_webpage \
605
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_prompt_eval \
606
+ --split test \
607
+ --max_search_limit 10 \
608
+ --max_turn 10 \
609
+ --top_k 10 \
610
+ --max_doc_len 5000 \
611
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-32B-Instruct" \
612
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
613
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_prompt_eval/inf.log 2>&1 &
614
+
615
+
616
+ export CUDA_VISIBLE_DEVICES=2,3
617
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
618
+ --dataset_name eval \
619
+ --cache_dir_base cache_eval_sum_all_webpage \
620
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval \
621
+ --split test \
622
+ --max_search_limit 10 \
623
+ --max_turn 10 \
624
+ --top_k 10 \
625
+ --max_doc_len 5000 \
626
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
627
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
628
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval/inf.log 2>&1 &
629
+
630
+
631
+ #############################
632
+ # date
633
+ export CUDA_VISIBLE_DEVICES=0,1
634
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_date.py \
635
+ --dataset_name eval \
636
+ --cache_dir_base cache_eval_sum_all_webpage_date \
637
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval_date \
638
+ --split test \
639
+ --max_search_limit 10 \
640
+ --max_turn 10 \
641
+ --top_k 10 \
642
+ --max_doc_len 5000 \
643
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
644
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
645
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval_date/inf.log 2>&1 &
646
+
647
+
648
+ export CUDA_VISIBLE_DEVICES=2,3
649
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
650
+ --dataset_name eval \
651
+ --cache_dir_base cache_eval_sum_all_webpage_date \
652
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_prompt_eval_date \
653
+ --split test \
654
+ --max_search_limit 10 \
655
+ --max_turn 10 \
656
+ --top_k 10 \
657
+ --max_doc_len 5000 \
658
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-32B-Instruct" \
659
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
660
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_prompt_eval_date/inf.log 2>&1 &
661
+
662
+
663
+ export CUDA_VISIBLE_DEVICES=4,5
664
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_date.py \
665
+ --dataset_name eval \
666
+ --cache_dir_base cache_eval_sum_all_webpage_date \
667
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval_date \
668
+ --split test \
669
+ --max_search_limit 10 \
670
+ --max_turn 10 \
671
+ --top_k 10 \
672
+ --max_doc_len 5000 \
673
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91" \
674
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
675
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval_date/inf.log 2>&1 &
676
+
677
+
678
+ export CUDA_VISIBLE_DEVICES=6,7
679
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_date.py \
680
+ --dataset_name eval \
681
+ --cache_dir_base cache_eval_sum_all_webpage_date \
682
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval_date \
683
+ --split test \
684
+ --max_search_limit 10 \
685
+ --max_turn 10 \
686
+ --top_k 10 \
687
+ --max_doc_len 5000 \
688
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144" \
689
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
690
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval_date/inf.log 2>&1 &
691
+
deep_search/search_o1/run_eval_benchmark_date.sh ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #############################
3
+ # date
4
+ export CUDA_VISIBLE_DEVICES=0,1
5
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_date.py \
6
+ --dataset_name eval \
7
+ --cache_dir_base cache_eval_sum_all_webpage_date \
8
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval_date \
9
+ --split test \
10
+ --max_search_limit 10 \
11
+ --max_turn 10 \
12
+ --top_k 10 \
13
+ --max_doc_len 5000 \
14
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
15
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
16
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval_date/inf.log 2>&1 &
17
+
18
+
19
+ export CUDA_VISIBLE_DEVICES=2,3
20
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
21
+ --dataset_name eval \
22
+ --cache_dir_base cache_eval_sum_all_webpage_date \
23
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_prompt_eval_date \
24
+ --split test \
25
+ --max_search_limit 10 \
26
+ --max_turn 10 \
27
+ --top_k 10 \
28
+ --max_doc_len 5000 \
29
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-32B-Instruct" \
30
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
31
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_prompt_eval_date/inf.log 2>&1 &
32
+
33
+
34
+ export CUDA_VISIBLE_DEVICES=4,5
35
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_date.py \
36
+ --dataset_name eval \
37
+ --cache_dir_base cache_eval_sum_all_webpage_date \
38
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval_date \
39
+ --split test \
40
+ --max_search_limit 10 \
41
+ --max_turn 10 \
42
+ --top_k 10 \
43
+ --max_doc_len 5000 \
44
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91" \
45
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
46
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/91/eval_date/inf.log 2>&1 &
47
+
48
+
49
+ export CUDA_VISIBLE_DEVICES=6,7
50
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_date.py \
51
+ --dataset_name eval \
52
+ --cache_dir_base cache_eval_sum_all_webpage_date \
53
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval_date \
54
+ --split test \
55
+ --max_search_limit 10 \
56
+ --max_turn 10 \
57
+ --top_k 10 \
58
+ --max_doc_len 5000 \
59
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144" \
60
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
61
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/144/eval_date/inf.log 2>&1 &
62
+
deep_search/search_o1/run_eval_benchmark_musique_syn.sh ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=0,1
2
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
3
+ --dataset_name musique_syn \
4
+ --cache_dir_base cache_eval_sum_all_webpage \
5
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/musique_syn \
6
+ --split test \
7
+ --max_search_limit 10 \
8
+ --max_turn 10 \
9
+ --top_k 10 \
10
+ --max_doc_len 5000 \
11
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-228" \
12
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
13
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/musique_syn/inf.log 2>&1 &
14
+
15
+ export CUDA_VISIBLE_DEVICES=2,3
16
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
17
+ --dataset_name musique_syn \
18
+ --cache_dir_base cache_eval_sum_all_webpage \
19
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/musique_syn \
20
+ --split test \
21
+ --max_search_limit 10 \
22
+ --max_turn 10 \
23
+ --top_k 10 \
24
+ --max_doc_len 5000 \
25
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-73" \
26
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
27
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:9578#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/73/musique_syn/inf.log 2>&1 &
28
+
29
+
30
+
31
+ # /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/eval_wiki
32
+ export CUDA_VISIBLE_DEVICES=4,5
33
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
34
+ --dataset_name musique_syn \
35
+ --cache_dir_base cache_eval_sum_all_webpage \
36
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/musique_syn \
37
+ --split test \
38
+ --max_search_limit 10 \
39
+ --max_turn 10 \
40
+ --top_k 10 \
41
+ --max_doc_len 5000 \
42
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/checkpoint-91" \
43
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
44
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:15076#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_instruction_2k_sft_remove_exp/91/musique_syn/inf.log 2>&1 &
45
+
46
+
47
+
48
+ export CUDA_VISIBLE_DEVICES=4,5
49
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
50
+ --dataset_name musique_syn \
51
+ --cache_dir_base cache_eval_sum_all_webpage \
52
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_musique_syn \
53
+ --split test \
54
+ --max_search_limit 10 \
55
+ --max_turn 10 \
56
+ --top_k 10 \
57
+ --max_doc_len 5000 \
58
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
59
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
60
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_musique_syn/inf.log 2>&1 &
61
+
62
+
63
+ export CUDA_VISIBLE_DEVICES=0,1
64
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
65
+ --dataset_name musique_syn \
66
+ --cache_dir_base cache_eval_sum_all_webpage \
67
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_musique_syn \
68
+ --split test \
69
+ --max_search_limit 10 \
70
+ --max_turn 10 \
71
+ --top_k 10 \
72
+ --max_doc_len 5000 \
73
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
74
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
75
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_musique_syn/inf.log 2>&1 &
76
+
77
+
78
+ export CUDA_VISIBLE_DEVICES=2,3
79
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
80
+ --dataset_name musique_syn \
81
+ --cache_dir_base cache_eval_sum_all_webpage \
82
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/musique_syn \
83
+ --split test \
84
+ --max_search_limit 10 \
85
+ --max_turn 10 \
86
+ --top_k 10 \
87
+ --max_doc_len 5000 \
88
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211" \
89
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
90
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/musique_syn/inf.log 2>&1 &
91
+
92
+
93
+
94
+ # musique_syn
95
+ # export CUDA_VISIBLE_DEVICES=6,7
96
+ # nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
97
+ # --dataset_name musique_syn \
98
+ # --cache_dir_base cache_eval_sum_all_webpage \
99
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/testt \
100
+ # --split test \
101
+ # --max_search_limit 10 \
102
+ # --max_turn 10 \
103
+ # --top_k 10 \
104
+ # --max_doc_len 5000 \
105
+ # --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
106
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
107
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/eval_wiki/test.log 2>&1 &
108
+
deep_search/search_o1/run_eval_benchmark_realqa.sh ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ export CUDA_VISIBLE_DEVICES=0,1
3
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
4
+ --dataset_name realqa \
5
+ --cache_dir_base cache_eval_sum_all_webpage \
6
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_realqa \
7
+ --split test \
8
+ --max_search_limit 10 \
9
+ --max_turn 10 \
10
+ --top_k 10 \
11
+ --max_doc_len 5000 \
12
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
13
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
14
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_setting_realqa/inf.log 2>&1 &
15
+
16
+
17
+
18
+ export CUDA_VISIBLE_DEVICES=6,7
19
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
20
+ --dataset_name realqa \
21
+ --cache_dir_base cache_eval_sum_all_webpage \
22
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_realqa \
23
+ --split test \
24
+ --max_search_limit 10 \
25
+ --max_turn 10 \
26
+ --top_k 10 \
27
+ --max_doc_len 5000 \
28
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-7B-Instruct" \
29
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
30
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen7b_inst_new_setting_realqa/inf.log 2>&1 &
31
+
32
+
33
+
34
+ export CUDA_VISIBLE_DEVICES=4,5
35
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
36
+ --dataset_name realqa \
37
+ --cache_dir_base cache_eval_sum_all_webpage \
38
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_setting_realqa \
39
+ --split test \
40
+ --max_search_limit 10 \
41
+ --max_turn 10 \
42
+ --top_k 10 \
43
+ --max_doc_len 5000 \
44
+ --model_path "/share/project/zhipengchen/model/Qwen2.5-32B-Instruct" \
45
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
46
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwen32b_inst_new_setting_realqa/inf.log 2>&1 &
47
+
48
+
49
+ export CUDA_VISIBLE_DEVICES=0,1
50
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
51
+ --dataset_name realqa \
52
+ --cache_dir_base cache_eval_sum_all_webpage \
53
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/realqa \
54
+ --split test \
55
+ --max_search_limit 10 \
56
+ --max_turn 10 \
57
+ --top_k 10 \
58
+ --max_doc_len 5000 \
59
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144" \
60
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
61
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/144/realqa/inf.log 2>&1 &
62
+
63
+
64
+ export CUDA_VISIBLE_DEVICES=2,3
65
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
66
+ --dataset_name realqa \
67
+ --cache_dir_base cache_eval_sum_all_webpage \
68
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/realqa \
69
+ --split test \
70
+ --max_search_limit 10 \
71
+ --max_turn 10 \
72
+ --top_k 10 \
73
+ --max_doc_len 5000 \
74
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144" \
75
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
76
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/realqa/inf.log 2>&1 &
77
+
78
+
79
+
80
+ export CUDA_VISIBLE_DEVICES=4,5
81
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
82
+ --dataset_name realqa \
83
+ --cache_dir_base cache_eval_sum_all_webpage \
84
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/realqa \
85
+ --split test \
86
+ --max_search_limit 10 \
87
+ --max_turn 10 \
88
+ --top_k 10 \
89
+ --max_doc_len 5000 \
90
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144" \
91
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
92
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:29319#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft/144/realqa/inf.log 2>&1 &
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+
101
+ export CUDA_VISIBLE_DEVICES=4,5
102
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
103
+ --dataset_name realqa \
104
+ --cache_dir_base cache_eval_sum_all_webpage \
105
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/realqa \
106
+ --split test \
107
+ --max_search_limit 10 \
108
+ --max_turn 10 \
109
+ --top_k 10 \
110
+ --max_doc_len 5000 \
111
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/checkpoint-228" \
112
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
113
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462/228/realqa/inf.log 2>&1 &
114
+
115
+
116
+ export CUDA_VISIBLE_DEVICES=6,7
117
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
118
+ --dataset_name realqa \
119
+ --cache_dir_base cache_eval_sum_all_webpage \
120
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/realqa \
121
+ --split test \
122
+ --max_search_limit 10 \
123
+ --max_turn 10 \
124
+ --top_k 10 \
125
+ --max_doc_len 5000 \
126
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211" \
127
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
128
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/211/realqa/inf.log 2>&1 &
129
+
deep_search/search_o1/run_eval_benchmark_rl.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=2,3
2
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
3
+ --dataset_name simpleqa \
4
+ --cache_dir_base cache_eval_sum_all_webpage \
5
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval \
6
+ --split test \
7
+ --max_search_limit 10 \
8
+ --max_turn 10 \
9
+ --top_k 10 \
10
+ --max_doc_len 5000 \
11
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
12
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
13
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval/outputs_sum_all_webpage_qwq_new_prompt_eval/inf.log 2>&1 &
14
+
deep_search/search_o1/run_eval_sft_1.sh ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # export CUDA_VISIBLE_DEVICES=2,3
2
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
3
+ # --dataset_name eval \
4
+ # --cache_dir_base cache_eval_sum_all_webpage \
5
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/eval \
6
+ # --split test \
7
+ # --max_search_limit 10 \
8
+ # --max_turn 10 \
9
+ # --top_k 10 \
10
+ # --max_doc_len 5000 \
11
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91" \
12
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
13
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/eval/inf.log 2>&1
14
+
15
+
16
+ # export CUDA_VISIBLE_DEVICES=2,3
17
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
18
+ # --dataset_name eval \
19
+ # --cache_dir_base cache_eval_sum_all_webpage \
20
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/eval \
21
+ # --split test \
22
+ # --max_search_limit 10 \
23
+ # --max_turn 10 \
24
+ # --top_k 10 \
25
+ # --max_doc_len 5000 \
26
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144" \
27
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
28
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/eval/inf.log 2>&1
29
+
30
+
31
+
32
+ export CUDA_VISIBLE_DEVICES=2,3
33
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
34
+ --dataset_name gaia \
35
+ --cache_dir_base cache_eval_sum_all_webpage \
36
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_1.5k/context_8k/gaia \
37
+ --split test \
38
+ --max_search_limit 10 \
39
+ --max_turn 10 \
40
+ --top_k 10 \
41
+ --max_doc_len 5000 \
42
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144" \
43
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
44
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_1.5k/context_8k/gaia/inf.log 2>&1
45
+
46
+ export CUDA_VISIBLE_DEVICES=2,3
47
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
48
+ --dataset_name gaia \
49
+ --cache_dir_base cache_eval_sum_all_webpage \
50
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/gaia \
51
+ --split test \
52
+ --max_search_limit 10 \
53
+ --max_turn 10 \
54
+ --top_k 10 \
55
+ --max_doc_len 5000 \
56
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211" \
57
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
58
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/gaia/inf.log 2>&1
59
+
60
+
61
+ # export CUDA_VISIBLE_DEVICES=2,3
62
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
63
+ # --dataset_name eval \
64
+ # --cache_dir_base cache_eval_sum_all_webpage \
65
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_1.5k/context_8k/eval \
66
+ # --split test \
67
+ # --max_search_limit 10 \
68
+ # --max_turn 10 \
69
+ # --top_k 10 \
70
+ # --max_doc_len 5000 \
71
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144" \
72
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
73
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_1.5k/context_8k/eval/inf.log 2>&1
74
+
75
+
76
+ # export CUDA_VISIBLE_DEVICES=2,3
77
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
78
+ # --dataset_name frames \
79
+ # --cache_dir_base cache_eval_sum_all_webpage \
80
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_1.5k/context_8k/frames \
81
+ # --split test \
82
+ # --max_search_limit 10 \
83
+ # --max_turn 10 \
84
+ # --top_k 10 \
85
+ # --max_doc_len 5000 \
86
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft/checkpoint-144" \
87
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
88
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_1.5k/context_8k/frames/inf.log 2>&1
89
+
deep_search/search_o1/run_eval_sft_2.sh ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # export CUDA_VISIBLE_DEVICES=4,5
3
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
4
+ # --dataset_name frames \
5
+ # --cache_dir_base cache_eval_sum_all_webpage \
6
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/frames \
7
+ # --split test \
8
+ # --max_search_limit 10 \
9
+ # --max_turn 10 \
10
+ # --top_k 10 \
11
+ # --max_doc_len 5000 \
12
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91" \
13
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
14
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/frames/inf.log 2>&1
15
+
16
+
17
+ # export CUDA_VISIBLE_DEVICES=4,5
18
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
19
+ # --dataset_name frames \
20
+ # --cache_dir_base cache_eval_sum_all_webpage \
21
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/frames \
22
+ # --split test \
23
+ # --max_search_limit 10 \
24
+ # --max_turn 10 \
25
+ # --top_k 10 \
26
+ # --max_doc_len 5000 \
27
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144" \
28
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
29
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/frames/inf.log 2>&1
30
+
31
+
32
+
33
+ # export CUDA_VISIBLE_DEVICES=0,1
34
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
35
+ # --dataset_name eval \
36
+ # --cache_dir_base cache_eval_sum_all_webpage \
37
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/eval \
38
+ # --split test \
39
+ # --max_search_limit 10 \
40
+ # --max_turn 10 \
41
+ # --top_k 10 \
42
+ # --max_doc_len 5000 \
43
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91" \
44
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
45
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/eval/inf.log 2>&1
46
+
47
+
48
+ # export CUDA_VISIBLE_DEVICES=0,1
49
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
50
+ # --dataset_name eval \
51
+ # --cache_dir_base cache_eval_sum_all_webpage \
52
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/eval \
53
+ # --split test \
54
+ # --max_search_limit 10 \
55
+ # --max_turn 10 \
56
+ # --top_k 10 \
57
+ # --max_doc_len 5000 \
58
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533/checkpoint-144" \
59
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
60
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/eval/inf.log 2>&1
61
+
62
+
63
+
64
+ export CUDA_VISIBLE_DEVICES=0,1
65
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
66
+ --dataset_name gaia \
67
+ --cache_dir_base cache_eval_sum_all_webpage \
68
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/gaia \
69
+ --split test \
70
+ --max_search_limit 10 \
71
+ --max_turn 10 \
72
+ --top_k 10 \
73
+ --max_doc_len 5000 \
74
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211" \
75
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
76
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/gaia/inf.log 2>&1
77
+
78
+ # export CUDA_VISIBLE_DEVICES=0,1
79
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
80
+ # --dataset_name eval \
81
+ # --cache_dir_base cache_eval_sum_all_webpage \
82
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/eval \
83
+ # --split test \
84
+ # --max_search_limit 10 \
85
+ # --max_turn 10 \
86
+ # --top_k 10 \
87
+ # --max_doc_len 5000 \
88
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211" \
89
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
90
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/eval/inf.log 2>&1
91
+
92
+
93
+ # export CUDA_VISIBLE_DEVICES=0,1
94
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
95
+ # --dataset_name frames \
96
+ # --cache_dir_base cache_eval_sum_all_webpage \
97
+ # --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/frames \
98
+ # --split test \
99
+ # --max_search_limit 10 \
100
+ # --max_turn 10 \
101
+ # --top_k 10 \
102
+ # --max_doc_len 5000 \
103
+ # --model_path "/share/project/sunshuang/deep_search/output/JOB:17054#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_2462_nq_237_total_2699/checkpoint-211" \
104
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
105
+ # --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_2.7k/context_8k/frames/inf.log 2>&1
106
+
deep_search/search_o1/run_eval_sft_3.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ export CUDA_VISIBLE_DEVICES=6,7
3
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
4
+ --dataset_name gaia \
5
+ --cache_dir_base cache_eval_sum_all_webpage \
6
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/gaia \
7
+ --split test \
8
+ --max_search_limit 10 \
9
+ --max_turn 10 \
10
+ --top_k 10 \
11
+ --max_doc_len 5000 \
12
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91" \
13
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
14
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_1.1k_91/gaia/inf.log 2>&1
15
+
16
+
17
+ export CUDA_VISIBLE_DEVICES=6,7
18
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
19
+ --dataset_name gaia \
20
+ --cache_dir_base cache_eval_sum_all_webpage \
21
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/gaia \
22
+ --split test \
23
+ --max_search_limit 10 \
24
+ --max_turn 10 \
25
+ --top_k 10 \
26
+ --max_doc_len 5000 \
27
+ --model_path "/share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft/checkpoint-91" \
28
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
29
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_syn_long_1533/gaia/inf.log 2>&1
30
+
deep_search/search_o1/run_eval_sft_sh.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ nohup bash /share/project/sunshuang/deep_search/search_o1/run_eval_sft_1.sh > /share/project/sunshuang/deep_search/search_o1/run_eval_sft_1.log 2>&1 &
2
+
3
+ nohup bash /share/project/sunshuang/deep_search/search_o1/run_eval_sft_2.sh > /share/project/sunshuang/deep_search/search_o1/run_eval_sft_2.log 2>&1 &
4
+
5
+ nohup bash /share/project/sunshuang/deep_search/search_o1/run_eval_sft_3.sh > /share/project/sunshuang/deep_search/search_o1/run_eval_sft_3.log 2>&1 &
deep_search/search_o1/run_eval_sft_train.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=0,1,6,7
2
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
3
+ --dataset_name no_error_data_871 \
4
+ --cache_dir_base cache_eval_sum_all_webpage \
5
+ --output_dir_base /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwen7b_inst_sft_871/78/no_error_data_871 \
6
+ --split test \
7
+ --max_search_limit 10 \
8
+ --max_turn 10 \
9
+ --top_k 10 \
10
+ --max_doc_len 5000 \
11
+ --model_path "/share/project/sunshuang/deep_search/q1/JOB:2569#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_new_800/checkpoint-41" \
12
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
13
+ --bing_endpoint "https://google.serper.dev/search" > /share/project/sunshuang/deep_search/search_o1/output/output_eval_sft/qwen7b_inst_sft_871/78/no_error_data_871/inf.log 2>&1 &
deep_search/search_o1/run_gen_data_1.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export http_proxy=http://127.0.0.1:7880
2
+ export https_proxy=http://127.0.0.1:7880
3
+
4
+ export CUDA_VISIBLE_DEVICES=4,5
5
+
6
+ /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_5.py \
7
+ --dataset_name hotpotqa \
8
+ --cache_dir_base cache_eval_hotpotqa \
9
+ --output_dir_base outputs_eval_hotpotqa \
10
+ --split test \
11
+ --max_search_limit 5 \
12
+ --max_turn 10 \
13
+ --top_k 5 \
14
+ --max_doc_len 3000 \
15
+ --batch_size 5000 \
16
+ --subset_num 100 \
17
+ --model_path "/opt/aps/workdir/output/checkpoint/debug_1" \
18
+ --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
19
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
20
+ --bing_endpoint "https://google.serper.dev/search"
21
+
22
+ # /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/run_search_o1_5.py \
23
+ # --dataset_name hotpotqa \
24
+ # --cache_dir_base cache_eval_hotpotqa_1 \
25
+ # --output_dir_base outputs_eval_hotpotqa_1 \
26
+ # --split test \
27
+ # --max_search_limit 5 \
28
+ # --max_turn 10 \
29
+ # --top_k 5 \
30
+ # --max_doc_len 3000 \
31
+ # --batch_size 5000 \
32
+ # --subset_num 500 \
33
+ # --model_path "/capacity/userdata/models/Qwen2.5-7B" \
34
+ # --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" \
35
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
36
+ # --bing_endpoint "https://google.serper.dev/search"
deep_search/search_o1/run_gen_data_for_simple_qa.sh ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export http_proxy=http://127.0.0.1:7880
2
+ export https_proxy=http://127.0.0.1:7880
3
+
4
+ export CUDA_VISIBLE_DEVICES=2,3
5
+
6
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_single_page.py \
7
+ # --dataset_name 2wiki \
8
+ # --cache_dir_base cache \
9
+ # --output_dir_base output/outputs_sum_single_page_2wiki_ckpt176 \
10
+ # --split test \
11
+ # --max_search_limit 10 \
12
+ # --max_turn 10 \
13
+ # --top_k 10 \
14
+ # --max_doc_len 5000 \
15
+ # --subset_num 100 \
16
+ # --model_path "/share/project/sunshuang/deep_research/search_o1/model/checkpoint-176" \
17
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
18
+ # --bing_endpoint "https://google.serper.dev/search"
19
+
20
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
21
+ --dataset_name bamboogle \
22
+ --cache_dir_base cache \
23
+ --output_dir_base output/outputs_sum_all_webpage_bamboogle_ckpt176 \
24
+ --split test \
25
+ --max_search_limit 10 \
26
+ --max_turn 10 \
27
+ --top_k 10 \
28
+ --max_doc_len 5000 \
29
+ --subset_num 200 \
30
+ --model_path "/share/project/sunshuang/deep_research/search_o1/model/checkpoint-176" \
31
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
32
+ --bing_endpoint "https://google.serper.dev/search"
33
+
34
+
35
+
36
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
37
+ # --dataset_name 2wiki \
38
+ # --cache_dir_base cache \
39
+ # --output_dir_base output/outputs_sum_all_webpage_2wiki_ckpt176 \
40
+ # --split test \
41
+ # --max_search_limit 10 \
42
+ # --max_turn 10 \
43
+ # --top_k 10 \
44
+ # --max_doc_len 5000 \
45
+ # --subset_num 100 \
46
+ # --model_path "/share/project/sunshuang/deep_research/search_o1/model/checkpoint-176" \
47
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
48
+ # --bing_endpoint "https://google.serper.dev/search"
49
+
50
+
51
+
52
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_single_page.py \
53
+ # --dataset_name simpleqa \
54
+ # --cache_dir_base cache \
55
+ # --output_dir_base output/outputs_sum_single_page_simpleqa \
56
+ # --split test \
57
+ # --max_search_limit 10 \
58
+ # --max_turn 10 \
59
+ # --top_k 10 \
60
+ # --max_doc_len 5000 \
61
+ # --subset_num 100 \
62
+ # --model_path "/share/project/zhipengchen/model/QwQ-32B" \
63
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
64
+ # --bing_endpoint "https://google.serper.dev/search"
65
+
66
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
67
+ # --dataset_name simpleqa \
68
+ # --cache_dir_base cache \
69
+ # --output_dir_base output/outputs_sum_all_webpage_simpleqa_ex_urls_qwq \
70
+ # --split test \
71
+ # --max_search_limit 10 \
72
+ # --max_turn 10 \
73
+ # --top_k 10 \
74
+ # --max_doc_len 5000 \
75
+ # --subset_num 100 \
76
+ # --model_path "/share/project/zhipengchen/model/QwQ-32B" \
77
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
78
+ # --bing_endpoint "https://google.serper.dev/search"
79
+
80
+
81
+
82
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_for_rag_rl.py \
83
+ # --dataset_name 2wiki \
84
+ # --cache_dir_base cache_rag_rl \
85
+ # --output_dir_base output/outputs_sum_all_webpage_simpleqa_rag_rl \
86
+ # --split test \
87
+ # --max_search_limit 10 \
88
+ # --max_turn 10 \
89
+ # --top_k 10 \
90
+ # --max_doc_len 5000 \
91
+ # --subset_num 100 \
92
+ # --model_path "/share/project/zhipengchen/model/Qwen-2.5-7B-base-RAG-RL" \
93
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
94
+ # --bing_endpoint "https://google.serper.dev/search"
95
+
96
+
97
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
98
+ --dataset_name simpleqa \
99
+ --cache_dir_base cache \
100
+ --output_dir_base output/outputs_sum_all_webpage_simpleqa500_qwq \
101
+ --split test \
102
+ --max_search_limit 10 \
103
+ --max_turn 10 \
104
+ --top_k 10 \
105
+ --max_doc_len 5000 \
106
+ --subset_num 500 \
107
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
108
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
109
+ --bing_endpoint "https://google.serper.dev/search"
110
+
111
+ export CUDA_VISIBLE_DEVICES=2,3
112
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
113
+ --dataset_name simpleqa \
114
+ --cache_dir_base cache_simpleqa_exurls_qwq \
115
+ --output_dir_base output/outputs_sum_all_webpage_simpleqa500_ex_urls_qwq \
116
+ --split test \
117
+ --max_search_limit 10 \
118
+ --max_turn 10 \
119
+ --top_k 10 \
120
+ --max_doc_len 5000 \
121
+ --subset_num 500 \
122
+ --is_exclude_urls \
123
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
124
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
125
+ --bing_endpoint "https://google.serper.dev/search"
126
+
127
+ export CUDA_VISIBLE_DEVICES=4,5
128
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
129
+ --dataset_name hotpotqa \
130
+ --cache_dir_base cache \
131
+ --output_dir_base output/outputs_sum_all_webpage_hotpotqa500_qwq \
132
+ --split test \
133
+ --max_search_limit 10 \
134
+ --max_turn 10 \
135
+ --top_k 10 \
136
+ --max_doc_len 5000 \
137
+ --subset_num 500 \
138
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
139
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
140
+ --bing_endpoint "https://google.serper.dev/search"
141
+
142
+
143
+ export CUDA_VISIBLE_DEVICES=6,7
144
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
145
+ --dataset_name simpleqa \
146
+ --cache_dir_base cache \
147
+ --output_dir_base output/outputs_sum_all_webpage_simpleqa500_ckpt176 \
148
+ --split test \
149
+ --max_search_limit 10 \
150
+ --max_turn 10 \
151
+ --top_k 10 \
152
+ --max_doc_len 5000 \
153
+ --subset_num 500 \
154
+ --model_path "/share/project/sunshuang/deep_research/search_o1/model/checkpoint-176" \
155
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
156
+ --bing_endpoint "https://google.serper.dev/search"
157
+
158
+
159
+
160
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
161
+ --dataset_name simpleqa \
162
+ --cache_dir_base cache \
163
+ --output_dir_base output/outputs_sum_all_webpage_simpleqa500_r1_32b \
164
+ --split test \
165
+ --max_search_limit 10 \
166
+ --max_turn 10 \
167
+ --top_k 10 \
168
+ --max_doc_len 5000 \
169
+ --subset_num 500 \
170
+ --model_path "/share/project/zhipengchen/model/DeepSeek-R1-Distill-Qwen-32B" \
171
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
172
+ --bing_endpoint "https://google.serper.dev/search" > logs/simpleqa500_r1_32b.log 2>&1 &
173
+
174
+
175
+ export CUDA_VISIBLE_DEVICES=2,3
176
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_test_new_prompt.py \
177
+ --dataset_name simpleqa \
178
+ --cache_dir_base cache \
179
+ --output_dir_base output/output_test_prompt/outputs_sum_all_webpage_simpleqa100_new_prompt_qwq \
180
+ --split test \
181
+ --max_search_limit 10 \
182
+ --max_turn 10 \
183
+ --top_k 10 \
184
+ --max_doc_len 5000 \
185
+ --subset_num 100 \
186
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
187
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
188
+ --bing_endpoint "https://google.serper.dev/search" > logs/simpleqa100_new_prompt_qwq.log 2>&1 &
189
+
190
+
191
+ export CUDA_VISIBLE_DEVICES=2,3
192
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
193
+ --dataset_name simpleqa \
194
+ --cache_dir_base cache \
195
+ --output_dir_base output/output_test_prompt/outputs_sum_all_webpage_simpleqa100_old_prompt_qwq \
196
+ --split test \
197
+ --max_search_limit 10 \
198
+ --max_turn 10 \
199
+ --top_k 10 \
200
+ --max_doc_len 5000 \
201
+ --subset_num 100 \
202
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
203
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
204
+ --bing_endpoint "https://google.serper.dev/search" > logs/simpleqa100_old_prompt_qwq.log 2>&1 &
205
+
206
+
207
+ export CUDA_VISIBLE_DEVICES=4,5
208
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_test_new_prompt.py \
209
+ --dataset_name hotpotqa \
210
+ --cache_dir_base cache \
211
+ --output_dir_base output/output_test_prompt/outputs_sum_all_webpage_hotpotqa100_new_prompt_qwq \
212
+ --split test \
213
+ --max_search_limit 10 \
214
+ --max_turn 10 \
215
+ --top_k 10 \
216
+ --max_doc_len 5000 \
217
+ --subset_num 100 \
218
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
219
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
220
+ --bing_endpoint "https://google.serper.dev/search" > logs/hotpotqa100_new_prompt_qwq.log 2>&1 &
221
+
222
+
223
+ export CUDA_VISIBLE_DEVICES=6,7
224
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
225
+ --dataset_name hotpotqa \
226
+ --cache_dir_base cache \
227
+ --output_dir_base output/output_test_prompt/outputs_sum_all_webpage_hotpotqa100_old_prompt_qwq \
228
+ --split test \
229
+ --max_search_limit 10 \
230
+ --max_turn 10 \
231
+ --top_k 10 \
232
+ --max_doc_len 5000 \
233
+ --subset_num 100 \
234
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
235
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
236
+ --bing_endpoint "https://google.serper.dev/search" > logs/hotpotqa100_old_prompt_qwq.log 2>&1 &
237
+
238
+
239
+ export CUDA_VISIBLE_DEVICES=0,1
240
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_single_page.py \
241
+ --dataset_name simpleqa \
242
+ --cache_dir_base cache_single_web_analysis \
243
+ --output_dir_base output/output_single_web_analysis/outputs_sum_single_page_gen_data_simpleqa \
244
+ --split gen \
245
+ --max_search_limit 10 \
246
+ --max_turn 10 \
247
+ --top_k 10 \
248
+ --max_doc_len 5000 \
249
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
250
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
251
+ --bing_endpoint "https://google.serper.dev/search" > logs/Simpleqa_sum_single_page_gen_data_simpleqa.log 2>&1 &
252
+
253
+
254
+ export CUDA_VISIBLE_DEVICES=0,1
255
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_single_page.py \
256
+ --dataset_name hotpotqa \
257
+ --cache_dir_base cache_single_web_analysis \
258
+ --output_dir_base output/output_single_web_analysis/outputs_sum_single_page_test_hotpotqa100 \
259
+ --split test \
260
+ --max_search_limit 10 \
261
+ --max_turn 10 \
262
+ --top_k 10 \
263
+ --subset_num 100 \
264
+ --max_doc_len 5000 \
265
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
266
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
267
+ --bing_endpoint "https://google.serper.dev/search" > logs/Simpleqa_sum_single_page_test_hotpotqa100.log 2>&1 &
268
+
269
+
270
+ # 测试eval数据集
271
+
272
+ export CUDA_VISIBLE_DEVICES=0,1
273
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
274
+ --dataset_name eval \
275
+ --cache_dir_base cache_eval_sum_all_webpage \
276
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_qwq \
277
+ --split test \
278
+ --max_search_limit 10 \
279
+ --max_turn 10 \
280
+ --top_k 10 \
281
+ --max_doc_len 5000 \
282
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
283
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
284
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_qwq.log 2>&1 &
285
+
286
+
287
+ export CUDA_VISIBLE_DEVICES=2,3
288
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_single_page.py \
289
+ --dataset_name eval \
290
+ --cache_dir_base cache_eval_sum_single_page_test_maxworker_64 \
291
+ --output_dir_base output/output_eval/outputs_sum_single_page_qwq_test_maxworker_64 \
292
+ --split test \
293
+ --max_search_limit 10 \
294
+ --max_turn 10 \
295
+ --top_k 10 \
296
+ --max_doc_len 5000 \
297
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
298
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
299
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_single_page_qwq_test_maxworker_64.log 2>&1 &
300
+
301
+ export CUDA_VISIBLE_DEVICES=4,5
302
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_test_new_prompt.py \
303
+ --dataset_name eval \
304
+ --cache_dir_base cache_eval_sum_all_webpage \
305
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_test_new_prompt_qwq \
306
+ --split test \
307
+ --max_search_limit 10 \
308
+ --max_turn 10 \
309
+ --top_k 10 \
310
+ --max_doc_len 5000 \
311
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
312
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
313
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_test_new_prompt_qwq.log 2>&1 &
314
+
315
+
316
+ export CUDA_VISIBLE_DEVICES=6,7
317
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
318
+ --dataset_name eval \
319
+ --cache_dir_base cache_test \
320
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_test \
321
+ --split test \
322
+ --max_search_limit 10 \
323
+ --max_turn 10 \
324
+ --top_k 10 \
325
+ --subset_num 5 \
326
+ --rollout_num 5 \
327
+ --max_doc_len 5000 \
328
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
329
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
330
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/test_1.log 2>&1 &
331
+
332
+
333
+
334
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
335
+ # --dataset_name simpleqa \
336
+ # --cache_dir_base cache_simpleqa_exurls_ckpt176 \
337
+ # --output_dir_base output/outputs_sum_all_webpage_simpleqa500_ex_urls_ckpt176 \
338
+ # --split test \
339
+ # --max_search_limit 10 \
340
+ # --max_turn 10 \
341
+ # --top_k 10 \
342
+ # --max_doc_len 5000 \
343
+ # --subset_num 500 \
344
+ # --is_exclude_urls \
345
+ # --model_path "/share/project/sunshuang/deep_research/search_o1/model/checkpoint-176" \
346
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
347
+ # --bing_endpoint "https://google.serper.dev/search"
348
+
349
+
350
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
351
+ # --dataset_name hotpotqa \
352
+ # --cache_dir_base cache \
353
+ # --output_dir_base output/outputs_sum_all_webpage_hotpotqa500_ckpt176 \
354
+ # --split test \
355
+ # --max_search_limit 10 \
356
+ # --max_turn 10 \
357
+ # --top_k 10 \
358
+ # --max_doc_len 5000 \
359
+ # --subset_num 500 \
360
+ # --model_path "/share/project/sunshuang/deep_research/search_o1/model/checkpoint-176" \
361
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
362
+ # --bing_endpoint "https://google.serper.dev/search"
363
+
364
+
365
+
366
+
367
+ export CUDA_VISIBLE_DEVICES=0,1
368
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
369
+ --dataset_name eval \
370
+ --cache_dir_base cache_eval_sum_all_webpage \
371
+ --output_dir_base output/output_eval/outputs_sum_all_webpage_ckpt176 \
372
+ --split test \
373
+ --max_search_limit 10 \
374
+ --max_turn 10 \
375
+ --top_k 10 \
376
+ --max_doc_len 5000 \
377
+ --model_path "/share/project/sunshuang/deep_search/search_o1/model/checkpoint-176" \
378
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
379
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_eval/sum_all_webpage_ckpt176.log 2>&1 &
deep_search/search_o1/run_gen_data_for_simple_qa_ex_urls.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export http_proxy=http://127.0.0.1:7880
2
+ export https_proxy=http://127.0.0.1:7880
3
+
4
+ export CUDA_VISIBLE_DEVICES=4,5,6,7
5
+
6
+
7
+ /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
8
+ --dataset_name simpleqa \
9
+ --cache_dir_base cache_simpleqa_real_exurls_ckpt176 \
10
+ --output_dir_base output/outputs_sum_all_webpage_simpleqa500_real_exurls_ckpt176 \
11
+ --split test \
12
+ --max_search_limit 10 \
13
+ --max_turn 10 \
14
+ --top_k 10 \
15
+ --max_doc_len 5000 \
16
+ --subset_num 500 \
17
+ --is_exclude_urls \
18
+ --model_path "/share/project/sunshuang/deep_research/search_o1/model/checkpoint-176" \
19
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
20
+ --bing_endpoint "https://google.serper.dev/search" > logs/simpleqa500_real_exurls_ckpt176.log 2>&1
21
+
22
+
23
+ # /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage.py \
24
+ # --dataset_name simpleqa \
25
+ # --cache_dir_base cache_simpleqa_real_exurls_qwq \
26
+ # --output_dir_base output/outputs_sum_all_webpage_simpleqa500_real_exurls_qwq \
27
+ # --split test \
28
+ # --max_search_limit 10 \
29
+ # --max_turn 10 \
30
+ # --top_k 10 \
31
+ # --max_doc_len 5000 \
32
+ # --subset_num 500 \
33
+ # --is_exclude_urls \
34
+ # --model_path "/share/project/zhipengchen/model/QwQ-32B" \
35
+ # --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
36
+ # --bing_endpoint "https://google.serper.dev/search" > logs/simpleqa500_real_exurls_qwq.log 2>&1
deep_search/search_o1/run_gen_data_mix.sh ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # worker 1
2
+ export CUDA_VISIBLE_DEVICES=0,1
3
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
4
+ --dataset_name split_1 \
5
+ --cache_dir_base cache_eval_sum_all_webpage \
6
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_1 \
7
+ --split gen \
8
+ --max_search_limit 10 \
9
+ --max_turn 10 \
10
+ --top_k 10 \
11
+ --rollout_num 10 \
12
+ --max_doc_len 5000 \
13
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
14
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
15
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_1.log 2>&1 &
16
+
17
+
18
+ export CUDA_VISIBLE_DEVICES=2,3
19
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
20
+ --dataset_name split_2 \
21
+ --cache_dir_base cache_eval_sum_all_webpage \
22
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_2 \
23
+ --split gen \
24
+ --max_search_limit 10 \
25
+ --max_turn 10 \
26
+ --top_k 10 \
27
+ --rollout_num 10 \
28
+ --max_doc_len 5000 \
29
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
30
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
31
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_2.log 2>&1 &
32
+
33
+ export CUDA_VISIBLE_DEVICES=4,5
34
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
35
+ --dataset_name split_3 \
36
+ --cache_dir_base cache_eval_sum_all_webpage \
37
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_3 \
38
+ --split gen \
39
+ --max_search_limit 10 \
40
+ --max_turn 10 \
41
+ --top_k 10 \
42
+ --rollout_num 10 \
43
+ --max_doc_len 5000 \
44
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
45
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
46
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_3.log 2>&1 &
47
+
48
+ export CUDA_VISIBLE_DEVICES=6,7
49
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
50
+ --dataset_name split_4 \
51
+ --cache_dir_base cache_eval_sum_all_webpage \
52
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_4 \
53
+ --split gen \
54
+ --max_search_limit 10 \
55
+ --max_turn 10 \
56
+ --top_k 10 \
57
+ --rollout_num 10 \
58
+ --max_doc_len 5000 \
59
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
60
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
61
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_4.log 2>&1 &
62
+
63
+ # worker 2
64
+ export CUDA_VISIBLE_DEVICES=0,1
65
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
66
+ --dataset_name split_5 \
67
+ --cache_dir_base cache_eval_sum_all_webpage \
68
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_5 \
69
+ --split gen \
70
+ --max_search_limit 10 \
71
+ --max_turn 10 \
72
+ --top_k 10 \
73
+ --rollout_num 10 \
74
+ --max_doc_len 5000 \
75
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
76
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
77
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_5.log 2>&1 &
78
+
79
+
80
+ export CUDA_VISIBLE_DEVICES=2,3
81
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
82
+ --dataset_name split_6 \
83
+ --cache_dir_base cache_eval_sum_all_webpage \
84
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_6 \
85
+ --split gen \
86
+ --max_search_limit 10 \
87
+ --max_turn 10 \
88
+ --top_k 10 \
89
+ --rollout_num 10 \
90
+ --max_doc_len 5000 \
91
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
92
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
93
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_6.log 2>&1 &
94
+
95
+
96
+
97
+ export CUDA_VISIBLE_DEVICES=4,5
98
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
99
+ --dataset_name split_7 \
100
+ --cache_dir_base cache_eval_sum_all_webpage \
101
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_7 \
102
+ --split gen \
103
+ --max_search_limit 10 \
104
+ --max_turn 10 \
105
+ --top_k 10 \
106
+ --rollout_num 10 \
107
+ --max_doc_len 5000 \
108
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
109
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
110
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_7.log 2>&1 &
111
+
112
+ export CUDA_VISIBLE_DEVICES=6,7
113
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
114
+ --dataset_name split_8 \
115
+ --cache_dir_base cache_eval_sum_all_webpage \
116
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_8 \
117
+ --split gen \
118
+ --max_search_limit 10 \
119
+ --max_turn 10 \
120
+ --top_k 10 \
121
+ --rollout_num 10 \
122
+ --max_doc_len 5000 \
123
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
124
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
125
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_8.log 2>&1 &
126
+
127
+ # worker 3
128
+ export CUDA_VISIBLE_DEVICES=0,1
129
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
130
+ --dataset_name split_9 \
131
+ --cache_dir_base cache_eval_sum_all_webpage \
132
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_9 \
133
+ --split gen \
134
+ --max_search_limit 10 \
135
+ --max_turn 10 \
136
+ --top_k 10 \
137
+ --rollout_num 10 \
138
+ --max_doc_len 5000 \
139
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
140
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
141
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_9.log 2>&1 &
142
+
143
+ export CUDA_VISIBLE_DEVICES=2,3
144
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
145
+ --dataset_name split_10 \
146
+ --cache_dir_base cache_eval_sum_all_webpage \
147
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_10 \
148
+ --split gen \
149
+ --max_search_limit 10 \
150
+ --max_turn 10 \
151
+ --top_k 10 \
152
+ --rollout_num 10 \
153
+ --max_doc_len 5000 \
154
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
155
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
156
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_10.log 2>&1 &
157
+
158
+ export CUDA_VISIBLE_DEVICES=4,5
159
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
160
+ --dataset_name split_11 \
161
+ --cache_dir_base cache_eval_sum_all_webpage \
162
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_11 \
163
+ --split gen \
164
+ --max_search_limit 10 \
165
+ --max_turn 10 \
166
+ --top_k 10 \
167
+ --rollout_num 10 \
168
+ --max_doc_len 5000 \
169
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
170
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
171
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_11.log 2>&1 &
172
+
173
+ export CUDA_VISIBLE_DEVICES=6,7
174
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
175
+ --dataset_name split_12 \
176
+ --cache_dir_base cache_eval_sum_all_webpage \
177
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_12 \
178
+ --split gen \
179
+ --max_search_limit 10 \
180
+ --max_turn 10 \
181
+ --top_k 10 \
182
+ --rollout_num 10 \
183
+ --max_doc_len 5000 \
184
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
185
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
186
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_12.log 2>&1 &
187
+
188
+ export CUDA_VISIBLE_DEVICES=0,1
189
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
190
+ --dataset_name split_13 \
191
+ --cache_dir_base cache_eval_sum_all_webpage \
192
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_13 \
193
+ --split gen \
194
+ --max_search_limit 10 \
195
+ --max_turn 10 \
196
+ --top_k 10 \
197
+ --rollout_num 10 \
198
+ --max_doc_len 5000 \
199
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
200
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
201
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_13.log 2>&1 &
202
+
203
+ export CUDA_VISIBLE_DEVICES=2,3
204
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
205
+ --dataset_name split_14 \
206
+ --cache_dir_base cache_eval_sum_all_webpage \
207
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_14 \
208
+ --split gen \
209
+ --max_search_limit 10 \
210
+ --max_turn 10 \
211
+ --top_k 10 \
212
+ --rollout_num 9 \
213
+ --max_doc_len 5000 \
214
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
215
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
216
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_14.log 2>&1 &
217
+
218
+ export CUDA_VISIBLE_DEVICES=4,5
219
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
220
+ --dataset_name split_15 \
221
+ --cache_dir_base cache_eval_sum_all_webpage \
222
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_15 \
223
+ --split gen \
224
+ --max_search_limit 10 \
225
+ --max_turn 10 \
226
+ --top_k 10 \
227
+ --rollout_num 9 \
228
+ --max_doc_len 5000 \
229
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
230
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
231
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_15.log 2>&1 &
232
+
233
+ export CUDA_VISIBLE_DEVICES=6,7
234
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
235
+ --dataset_name split_16 \
236
+ --cache_dir_base cache_eval_sum_all_webpage \
237
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_3200_split_16 \
238
+ --split gen \
239
+ --max_search_limit 10 \
240
+ --max_turn 10 \
241
+ --top_k 10 \
242
+ --rollout_num 10 \
243
+ --max_doc_len 5000 \
244
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
245
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
246
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/3200_split_16.log 2>&1 &
deep_search/search_o1/run_gen_data_mix_last_8000.sh ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # worker 1
2
+ export CUDA_VISIBLE_DEVICES=0,1
3
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
4
+ --dataset_name mixed_data_left_last_8000_split_1 \
5
+ --cache_dir_base cache_eval_sum_all_webpage \
6
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_1 \
7
+ --split gen \
8
+ --max_search_limit 10 \
9
+ --max_turn 10 \
10
+ --top_k 10 \
11
+ --rollout_num 3 \
12
+ --max_doc_len 5000 \
13
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
14
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
15
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_1.log 2>&1 &
16
+
17
+
18
+ export CUDA_VISIBLE_DEVICES=6,7
19
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
20
+ --dataset_name mixed_data_left_last_8000_split_2 \
21
+ --cache_dir_base cache_eval_sum_all_webpage \
22
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_2 \
23
+ --split gen \
24
+ --max_search_limit 10 \
25
+ --max_turn 10 \
26
+ --top_k 10 \
27
+ --rollout_num 1 \
28
+ --max_doc_len 5000 \
29
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
30
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
31
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_2.log 2>&1 &
32
+
33
+
34
+
35
+ export CUDA_VISIBLE_DEVICES=4,5
36
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
37
+ --dataset_name mixed_data_left_last_8000_split_3 \
38
+ --cache_dir_base cache_eval_sum_all_webpage \
39
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_3 \
40
+ --split gen \
41
+ --max_search_limit 10 \
42
+ --max_turn 10 \
43
+ --top_k 10 \
44
+ --rollout_num 3 \
45
+ --max_doc_len 5000 \
46
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
47
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
48
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_3.log 2>&1 &
49
+
50
+
51
+
52
+
53
+ export CUDA_VISIBLE_DEVICES=6,7
54
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
55
+ --dataset_name mixed_data_left_last_8000_split_4 \
56
+ --cache_dir_base cache_eval_sum_all_webpage \
57
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_4 \
58
+ --split gen \
59
+ --max_search_limit 10 \
60
+ --max_turn 10 \
61
+ --top_k 10 \
62
+ --rollout_num 3 \
63
+ --max_doc_len 5000 \
64
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
65
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
66
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_4.log 2>&1 &
67
+
68
+
69
+ # worker 2
70
+ export CUDA_VISIBLE_DEVICES=4,5
71
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
72
+ --dataset_name mixed_data_left_last_8000_split_5 \
73
+ --cache_dir_base cache_eval_sum_all_webpage \
74
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_5 \
75
+ --split gen \
76
+ --max_search_limit 10 \
77
+ --max_turn 10 \
78
+ --top_k 10 \
79
+ --rollout_num 3 \
80
+ --max_doc_len 5000 \
81
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
82
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
83
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_5.log 2>&1 &
84
+
85
+
86
+ export CUDA_VISIBLE_DEVICES=0,1
87
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
88
+ --dataset_name mixed_data_left_last_8000_split_6 \
89
+ --cache_dir_base cache_eval_sum_all_webpage \
90
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_6 \
91
+ --split gen \
92
+ --max_search_limit 10 \
93
+ --max_turn 10 \
94
+ --top_k 10 \
95
+ --rollout_num 1 \
96
+ --max_doc_len 5000 \
97
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
98
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
99
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_6.log 2>&1 &
100
+
101
+
102
+ export CUDA_VISIBLE_DEVICES=4,5
103
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
104
+ --dataset_name mixed_data_left_last_8000_split_7 \
105
+ --cache_dir_base cache_eval_sum_all_webpage \
106
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_7 \
107
+ --split gen \
108
+ --max_search_limit 10 \
109
+ --max_turn 10 \
110
+ --top_k 10 \
111
+ --rollout_num 3 \
112
+ --max_doc_len 5000 \
113
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
114
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
115
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_7.log 2>&1 &
116
+
117
+ export CUDA_VISIBLE_DEVICES=6,7
118
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
119
+ --dataset_name mixed_data_left_last_8000_split_8 \
120
+ --cache_dir_base cache_eval_sum_all_webpage \
121
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_8 \
122
+ --split gen \
123
+ --max_search_limit 10 \
124
+ --max_turn 10 \
125
+ --top_k 10 \
126
+ --rollout_num 3 \
127
+ --max_doc_len 5000 \
128
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
129
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
130
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_8.log 2>&1 &
131
+
132
+
133
+ # worker 3
134
+ export CUDA_VISIBLE_DEVICES=0,1
135
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
136
+ --dataset_name mixed_data_left_last_8000_split_9 \
137
+ --cache_dir_base cache_eval_sum_all_webpage \
138
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_9 \
139
+ --split gen \
140
+ --max_search_limit 10 \
141
+ --max_turn 10 \
142
+ --top_k 10 \
143
+ --rollout_num 3 \
144
+ --max_doc_len 5000 \
145
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
146
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
147
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_9.log 2>&1 &
148
+
149
+
150
+
151
+ export CUDA_VISIBLE_DEVICES=2,3
152
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
153
+ --dataset_name mixed_data_left_last_8000_split_10 \
154
+ --cache_dir_base cache_eval_sum_all_webpage \
155
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_10 \
156
+ --split gen \
157
+ --max_search_limit 10 \
158
+ --max_turn 10 \
159
+ --top_k 10 \
160
+ --rollout_num 3 \
161
+ --max_doc_len 5000 \
162
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
163
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
164
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_10.log 2>&1 &
165
+
166
+
167
+ export CUDA_VISIBLE_DEVICES=4,5
168
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
169
+ --dataset_name mixed_data_left_last_8000_split_11 \
170
+ --cache_dir_base cache_eval_sum_all_webpage \
171
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_11 \
172
+ --split gen \
173
+ --max_search_limit 10 \
174
+ --max_turn 10 \
175
+ --top_k 10 \
176
+ --rollout_num 3 \
177
+ --max_doc_len 5000 \
178
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
179
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
180
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_11.log 2>&1 &
181
+
182
+
183
+ export CUDA_VISIBLE_DEVICES=6,7
184
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
185
+ --dataset_name mixed_data_left_last_8000_split_12 \
186
+ --cache_dir_base cache_eval_sum_all_webpage \
187
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_mixed_data_left_last_8000_split_12 \
188
+ --split gen \
189
+ --max_search_limit 10 \
190
+ --max_turn 10 \
191
+ --top_k 10 \
192
+ --rollout_num 3 \
193
+ --max_doc_len 5000 \
194
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
195
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
196
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/mixed_data_left_last_8000_split_12.log 2>&1 &
197
+
198
+
199
+
200
+ # gen nq random select 500
201
+ export CUDA_VISIBLE_DEVICES=0,1
202
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
203
+ --dataset_name nq_500_split_0 \
204
+ --cache_dir_base cache_eval_sum_all_webpage \
205
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_nq_500_split_0 \
206
+ --split gen \
207
+ --max_search_limit 10 \
208
+ --max_turn 10 \
209
+ --top_k 10 \
210
+ --rollout_num 5 \
211
+ --max_doc_len 5000 \
212
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
213
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
214
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/nq_500_split_0.log 2>&1 &
215
+
216
+
217
+ export CUDA_VISIBLE_DEVICES=2,3
218
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
219
+ --dataset_name nq_500_split_1 \
220
+ --cache_dir_base cache_eval_sum_all_webpage \
221
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_nq_500_split_1 \
222
+ --split gen \
223
+ --max_search_limit 10 \
224
+ --max_turn 10 \
225
+ --top_k 10 \
226
+ --rollout_num 5 \
227
+ --max_doc_len 5000 \
228
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
229
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
230
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/nq_500_split_1.log 2>&1 &
231
+
232
+ export CUDA_VISIBLE_DEVICES=4,5
233
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
234
+ --dataset_name nq_500_split_2 \
235
+ --cache_dir_base cache_eval_sum_all_webpage \
236
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_nq_500_split_2 \
237
+ --split gen \
238
+ --max_search_limit 10 \
239
+ --max_turn 10 \
240
+ --top_k 10 \
241
+ --rollout_num 5 \
242
+ --max_doc_len 5000 \
243
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
244
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
245
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/nq_500_split_2.log 2>&1 &
246
+
247
+ export CUDA_VISIBLE_DEVICES=6,7
248
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
249
+ --dataset_name nq_500_split_3 \
250
+ --cache_dir_base cache_eval_sum_all_webpage \
251
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_nq_500_split_3 \
252
+ --split gen \
253
+ --max_search_limit 10 \
254
+ --max_turn 10 \
255
+ --top_k 10 \
256
+ --rollout_num 5 \
257
+ --max_doc_len 5000 \
258
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
259
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
260
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/nq_500_split_3.log 2>&1 &
deep_search/search_o1/run_gen_data_selected_remove_2k.sh ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # worker 1
2
+ export CUDA_VISIBLE_DEVICES=0,1
3
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
4
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_1 \
5
+ --cache_dir_base cache_eval_sum_all_webpage \
6
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_1 \
7
+ --split gen \
8
+ --max_search_limit 10 \
9
+ --max_turn 10 \
10
+ --top_k 10 \
11
+ --rollout_num 10 \
12
+ --max_doc_len 5000 \
13
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
14
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
15
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_1.log 2>&1 &
16
+
17
+
18
+ export CUDA_VISIBLE_DEVICES=2,3
19
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
20
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_2 \
21
+ --cache_dir_base cache_eval_sum_all_webpage \
22
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_2 \
23
+ --split gen \
24
+ --max_search_limit 10 \
25
+ --max_turn 10 \
26
+ --top_k 10 \
27
+ --rollout_num 10 \
28
+ --max_doc_len 5000 \
29
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
30
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
31
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_2.log 2>&1 &
32
+
33
+
34
+ export CUDA_VISIBLE_DEVICES=4,5
35
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
36
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_3 \
37
+ --cache_dir_base cache_eval_sum_all_webpage \
38
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_3 \
39
+ --split gen \
40
+ --max_search_limit 10 \
41
+ --max_turn 10 \
42
+ --top_k 10 \
43
+ --rollout_num 10 \
44
+ --max_doc_len 5000 \
45
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
46
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
47
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_3.log 2>&1 &
48
+
49
+
50
+ export CUDA_VISIBLE_DEVICES=6,7
51
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
52
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_4 \
53
+ --cache_dir_base cache_eval_sum_all_webpage \
54
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_4 \
55
+ --split gen \
56
+ --max_search_limit 10 \
57
+ --max_turn 10 \
58
+ --top_k 10 \
59
+ --rollout_num 10 \
60
+ --max_doc_len 5000 \
61
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
62
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
63
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_4.log 2>&1 &
64
+
65
+
66
+ export CUDA_VISIBLE_DEVICES=0,1
67
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
68
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_5 \
69
+ --cache_dir_base cache_eval_sum_all_webpage \
70
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_5 \
71
+ --split gen \
72
+ --max_search_limit 10 \
73
+ --max_turn 10 \
74
+ --top_k 10 \
75
+ --rollout_num 10 \
76
+ --max_doc_len 5000 \
77
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
78
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
79
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_5.log 2>&1 &
80
+
81
+
82
+
83
+ export CUDA_VISIBLE_DEVICES=2,3
84
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
85
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_6 \
86
+ --cache_dir_base cache_eval_sum_all_webpage \
87
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_6 \
88
+ --split gen \
89
+ --max_search_limit 10 \
90
+ --max_turn 10 \
91
+ --top_k 10 \
92
+ --rollout_num 10 \
93
+ --max_doc_len 5000 \
94
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
95
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
96
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_6.log 2>&1 &
97
+
98
+
99
+
100
+ export CUDA_VISIBLE_DEVICES=4,5
101
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
102
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_7 \
103
+ --cache_dir_base cache_eval_sum_all_webpage \
104
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_7 \
105
+ --split gen \
106
+ --max_search_limit 10 \
107
+ --max_turn 10 \
108
+ --top_k 10 \
109
+ --rollout_num 10 \
110
+ --max_doc_len 5000 \
111
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
112
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
113
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_7.log 2>&1 &
114
+
115
+
116
+ export CUDA_VISIBLE_DEVICES=6,7
117
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
118
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_8 \
119
+ --cache_dir_base cache_eval_sum_all_webpage \
120
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_8 \
121
+ --split gen \
122
+ --max_search_limit 10 \
123
+ --max_turn 10 \
124
+ --top_k 10 \
125
+ --rollout_num 10 \
126
+ --max_doc_len 5000 \
127
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
128
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
129
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_8.log 2>&1 &
130
+
131
+
132
+
133
+ export CUDA_VISIBLE_DEVICES=0,1
134
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
135
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_9 \
136
+ --cache_dir_base cache_eval_sum_all_webpage \
137
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_9 \
138
+ --split gen \
139
+ --max_search_limit 10 \
140
+ --max_turn 10 \
141
+ --top_k 10 \
142
+ --rollout_num 10 \
143
+ --max_doc_len 5000 \
144
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
145
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
146
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_9.log 2>&1 &
147
+
148
+
149
+
150
+
151
+
152
+
153
+ export CUDA_VISIBLE_DEVICES=2,3
154
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
155
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_10 \
156
+ --cache_dir_base cache_eval_sum_all_webpage \
157
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_10 \
158
+ --split gen \
159
+ --max_search_limit 10 \
160
+ --max_turn 10 \
161
+ --top_k 10 \
162
+ --rollout_num 10 \
163
+ --max_doc_len 5000 \
164
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
165
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
166
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_10.log 2>&1 &
167
+
168
+
169
+
170
+ export CUDA_VISIBLE_DEVICES=4,5
171
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
172
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_11 \
173
+ --cache_dir_base cache_eval_sum_all_webpage \
174
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_11 \
175
+ --split gen \
176
+ --max_search_limit 10 \
177
+ --max_turn 10 \
178
+ --top_k 10 \
179
+ --rollout_num 10 \
180
+ --max_doc_len 5000 \
181
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
182
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
183
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_11.log 2>&1 &
184
+
185
+
186
+ export CUDA_VISIBLE_DEVICES=6,7
187
+ nohup /share/project/miniconda/envs/search_o1/bin/python -u scripts/search_o1_sum_all_webpage_rollout.py \
188
+ --dataset_name merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_12 \
189
+ --cache_dir_base cache_eval_sum_all_webpage \
190
+ --output_dir_base output/output_sum_all_webpage_gen_data/outputs_merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_12 \
191
+ --split gen \
192
+ --max_search_limit 10 \
193
+ --max_turn 10 \
194
+ --top_k 10 \
195
+ --rollout_num 10 \
196
+ --max_doc_len 5000 \
197
+ --model_path "/share/project/zhipengchen/model/QwQ-32B" \
198
+ --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" \
199
+ --bing_endpoint "https://google.serper.dev/search" > logs/log_sum_all_webpage_gen_data/merged_tagged_domain_keypoints_keywords_count_hop_remove_2k_data_split_12.log 2>&1 &
200
+
201
+
deep_search/search_o1/search.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "searchParameters": {
3
+ "q": "when does season 14 of grey's anatomy come out",
4
+ "type": "search",
5
+ "num": 11,
6
+ "mkt": "en-US",
7
+ "setLang": "en",
8
+ "engine": "google"
9
+ },
10
+ "answerBox": {
11
+ "snippet": "The fourteenth season of the American television medical drama Grey's Anatomy was ordered on February 10, 2017, by American Broadcasting Company (ABC), and premiered on September 28, 2017 with a special 2-hour premiere.",
12
+ "snippetHighlighted": [
13
+ "September 28, 2017"
14
+ ],
15
+ "title": "Grey's Anatomy season 14 - Wikipedia",
16
+ "link": "https://en.wikipedia.org/wiki/Grey%27s_Anatomy_season_14"
17
+ },
18
+ "organic": [
19
+ {
20
+ "title": "Season 14 (Grey's Anatomy)",
21
+ "link": "https://greysanatomy.fandom.com/wiki/Season_14_(Grey%27s_Anatomy)",
22
+ "snippet": "The fourteenth season of Grey's Anatomy premiered September 28, 2017, with a two-hour premiere. It was broadcast on Thursday nights at 8 PM on ABC.",
23
+ "position": 1
24
+ },
25
+ {
26
+ "title": "Grey's Anatomy Season 14 Premiere Date Announced - ABC",
27
+ "link": "https://abc.com/news/9fce69b2-3b93-40f3-9cd3-d75b1d90bb05/category/738075",
28
+ "snippet": "Grey's Anatomy fans, get ready for the Season 14 two-hour premiere on Thursday, September 28 at 8|7c!",
29
+ "date": "Jul 31, 2017",
30
+ "position": 2
31
+ },
32
+ {
33
+ "title": "Grey's Anatomy Season 14 [DVD] [2018] - Amazon.com",
34
+ "link": "https://www.amazon.com/Greys-Anatomy-Season-14-DVD/dp/B07H2VPXY1",
35
+ "snippet": "Grey's Anatomy Season 14 [DVD] [2018] ; Language, ‎English (Dolby Digital 5.1) ; Aspect Ratio, ‎Unknown ; Release date, ‎October 22, 2018 ; ASIN, ‎B07H2VPXY1.",
36
+ "rating": 3.9,
37
+ "ratingCount": 1134,
38
+ "position": 3
39
+ },
40
+ {
41
+ "title": "Season 14 – Grey's Anatomy - Rotten Tomatoes",
42
+ "link": "https://www.rottentomatoes.com/tv/greys_anatomy/s14",
43
+ "snippet": "Watch Grey's Anatomy — Season 14 with a subscription on Disney+, Hulu, Netflix, or buy it on Fandango at Home. Grey's Anatomy — Season 14. What to Know.",
44
+ "position": 4
45
+ },
46
+ {
47
+ "title": "Grey's Anatomy (TV Series 2005– ) - Episode list - IMDb",
48
+ "link": "https://www.imdb.com/title/tt0413573/episodes/?season=14",
49
+ "snippet": "S14.E1 ∙ Break Down the House. Thu, Sep 28, 2017.",
50
+ "sitelinks": [
51
+ {
52
+ "title": "Break Down the House",
53
+ "link": "https://www.imdb.com/title/tt6523880/"
54
+ },
55
+ {
56
+ "title": "Fight For Your Mind",
57
+ "link": "https://www.imdb.com/title/tt7044330/"
58
+ },
59
+ {
60
+ "title": "1-800-799-7233",
61
+ "link": "https://www.imdb.com/title/tt7043736/"
62
+ },
63
+ {
64
+ "title": "9.1",
65
+ "link": "https://www.imdb.com/title/tt7043724/"
66
+ }
67
+ ],
68
+ "position": 5
69
+ },
70
+ {
71
+ "title": "Grey's Anatomy - Wikipedia",
72
+ "link": "https://en.wikipedia.org/wiki/Grey%27s_Anatomy",
73
+ "snippet": "The series premiered on March 27, 2005, on ABC as a mid-season replacement. The show's title is a reference to Gray's Anatomy, a classic human anatomy textbook.",
74
+ "sitelinks": [
75
+ {
76
+ "title": "Season 14",
77
+ "link": "https://en.wikipedia.org/wiki/Grey%27s_Anatomy_season_14"
78
+ },
79
+ {
80
+ "title": "List of Grey's Anatomy episodes",
81
+ "link": "https://en.wikipedia.org/wiki/List_of_Grey%27s_Anatomy_episodes"
82
+ },
83
+ {
84
+ "title": "Season 1",
85
+ "link": "https://en.wikipedia.org/wiki/Grey%27s_Anatomy_season_1"
86
+ },
87
+ {
88
+ "title": "Season 15",
89
+ "link": "https://en.wikipedia.org/wiki/Grey%27s_Anatomy_season_15"
90
+ }
91
+ ],
92
+ "position": 6
93
+ },
94
+ {
95
+ "title": "1-800-799-7233 | Grey's Anatomy Universe Wiki | Fandom",
96
+ "link": "https://greysanatomy.fandom.com/wiki/1-800-799-7233",
97
+ "snippet": "1-800-799-7233 is the ninth episode and mid-season premiere of the fourteenth season and the 302nd overall episode of Grey's Anatomy.",
98
+ "date": "Jan 18, 2018",
99
+ "position": 7
100
+ },
101
+ {
102
+ "title": "Grey's Anatomy Season 14 \"Most Addictive Drama\" Promo (HD)",
103
+ "link": "https://www.youtube.com/watch?v=4fdiQqDp6BY",
104
+ "snippet": "Grey's Anatomy Season 14 premieres thursday September 28th on ABC! Meredith and the team are focused on helping Owen's sister after her ...",
105
+ "date": "Sep 21, 2017",
106
+ "position": 8
107
+ },
108
+ {
109
+ "title": "Grey's Anatomy Season 14 Air Dates & Countdown - EpisoDate.com",
110
+ "link": "https://www.episodate.com/tv-show/grey-s-anatomy?season=14",
111
+ "snippet": "Grey's Anatomy Season 14 Air Dates ; Thursday Sep 28, 2017 · S14E01 - Break Down the House ; Thursday Sep 28, 2017 · S14E02 - Get Off on the Pain ; Thursday Oct 05, ...",
112
+ "rating": 9.5,
113
+ "ratingMax": 10,
114
+ "ratingCount": 109,
115
+ "position": 9
116
+ },
117
+ {
118
+ "title": "'Grey's Anatomy' Is Obviously Returning For Season 14 - Bustle",
119
+ "link": "https://www.bustle.com/p/when-does-greys-anatomy-season-14-premiere-the-show-will-go-on-as-long-as-meredith-does-57795",
120
+ "snippet": "UPDATE: ABC announced that Grey's Anatomy Season 14 will have a two-hour season premiere on Sept. 28. EARLIER: Grey's Anatomy is set to air on ...",
121
+ "date": "May 18, 2017",
122
+ "position": 10
123
+ }
124
+ ],
125
+ "peopleAlsoAsk": [
126
+ {
127
+ "question": "When did season 14 of Grey's Anatomy come out?",
128
+ "snippet": "The fourteenth season of Grey's Anatomy premiered September 28, 2017, with a two-hour premiere.",
129
+ "title": "Season 14 (Grey's Anatomy)",
130
+ "link": "https://greysanatomy.fandom.com/wiki/Season_14_(Grey%27s_Anatomy)"
131
+ },
132
+ {
133
+ "question": "When did Grey's Anatomy season 15 come out?",
134
+ "snippet": "The fifteenth season of the American television medical drama Grey's Anatomy was ordered on April 20, 2018, by American Broadcasting Company (ABC). The season premiered on September 27, 2018, with a special 2-hour premiere. The episode count for the season consists in 25 episodes.",
135
+ "title": "Grey's Anatomy season 15 - Wikipedia",
136
+ "link": "https://en.wikipedia.org/wiki/Grey%27s_Anatomy_season_15"
137
+ },
138
+ {
139
+ "question": "Is Grey's Anatomy season 19 out?",
140
+ "snippet": "It premiered on October 6, 2022, for the 2022–23 broadcast television season. Krista Vernoff returned for the season as executive producer and showrunner alongside her production company Trip the Light Productions.",
141
+ "title": "Grey's Anatomy season 19 - Wikipedia",
142
+ "link": "https://en.wikipedia.org/wiki/Grey%27s_Anatomy_season_19"
143
+ }
144
+ ],
145
+ "relatedSearches": [
146
+ {
147
+ "query": "Grey's Anatomy season 14 cast Guest Stars"
148
+ },
149
+ {
150
+ "query": "Grey's Anatomy season 14 cast interns"
151
+ },
152
+ {
153
+ "query": "Grey's Anatomy Season 15 summary"
154
+ },
155
+ {
156
+ "query": "How many Episodes in Grey's Anatomy season 15"
157
+ },
158
+ {
159
+ "query": "When did season 15 of Grey's Anatomy come out"
160
+ },
161
+ {
162
+ "query": "Grey's Anatomy season 14 Episode 11 full cast"
163
+ },
164
+ {
165
+ "query": "Grey's Anatomy season 14 Episode 16 cast astronaut"
166
+ },
167
+ {
168
+ "query": "Who dies in season 15 of Grey's Anatomy"
169
+ }
170
+ ],
171
+ "credits": 2
172
+ }