SunSec commited on
Commit
41c6b28
·
verified ·
1 Parent(s): 13e56c8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +10 -0
  2. deep_search/data_from_zhiyuan/data_for_paper/4-28_4k_question_1/final_selected_dataset.json +3 -0
  3. deep_search/data_from_zhiyuan/data_for_paper/orignal_871/no_error_data_871.json +3 -0
  4. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_8_tagged.json +3 -0
  5. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/all_stats/domain_distribution_pie.png +3 -0
  6. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/all_stats/hop_histogram.png +3 -0
  7. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/domain_distribution_pie.png +3 -0
  8. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset.json +3 -0
  9. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset_1.json +3 -0
  10. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/hop_histogram.png +3 -0
  11. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_3_tagged.json +3 -0
  12. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_4_tagged.json +3 -0
  13. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_5_tagged.json +3 -0
  14. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_6_tagged.json +3 -0
  15. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_7_tagged.json +3 -0
  16. deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/total_histogram.png +3 -0
  17. deep_search/search_o1/scripts/__pycache__/bing_search.cpython-39.pyc +0 -0
  18. deep_search/search_o1/scripts/__pycache__/evaluate.cpython-310.pyc +0 -0
  19. deep_search/search_o1/scripts/__pycache__/evaluate.cpython-311.pyc +0 -0
  20. deep_search/search_o1/scripts/__pycache__/evaluate.cpython-39.pyc +0 -0
  21. deep_search/search_o1/scripts/__pycache__/prompts.cpython-310.pyc +0 -0
  22. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/README.md +174 -0
  23. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/cache/search_cache.json +752 -0
  24. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/cache/url_cache.json +0 -0
  25. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/SimpleDeepSearcher_demo.py +899 -0
  26. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/SimpleDeepSearcher_demo_1.py +1043 -0
  27. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/add_eval.cpython-310.pyc +0 -0
  28. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/bing_search.cpython-310.pyc +0 -0
  29. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/evaluate.cpython-310.pyc +0 -0
  30. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/google_search.cpython-310.pyc +0 -0
  31. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/google_search.cpython-311.pyc +0 -0
  32. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/prompts.cpython-310.pyc +0 -0
  33. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/prompts.cpython-311.pyc +0 -0
  34. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/stage_wise_analysis.cpython-310.pyc +0 -0
  35. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/stage_wise_analysis.cpython-311.pyc +0 -0
  36. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/chatweb.log +0 -0
  37. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/demo_client_basic.json +51 -0
  38. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/evaluate.py +452 -0
  39. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/google_search.py +416 -0
  40. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/inference.py +774 -0
  41. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_main.log +0 -0
  42. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_main.sh +3 -0
  43. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_reasoning.log +113 -0
  44. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_reasoning.sh +8 -0
  45. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_summ.log +0 -0
  46. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_summ.sh +11 -0
  47. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/__init__.py +13 -0
  48. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/code_execution.py +67 -0
  49. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/code_generation.py +139 -0
  50. deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/test_output_prediction.py +70 -0
.gitattributes CHANGED
@@ -75,3 +75,13 @@ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_5_tagged.jso
75
  deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_1_tagged.json filter=lfs diff=lfs merge=lfs -text
76
  deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_2_tagged.json filter=lfs diff=lfs merge=lfs -text
77
  deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/musique_tagged_domain_keypoints_keywords_count.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
75
  deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_1_tagged.json filter=lfs diff=lfs merge=lfs -text
76
  deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_2_tagged.json filter=lfs diff=lfs merge=lfs -text
77
  deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/musique_tagged_domain_keypoints_keywords_count.json filter=lfs diff=lfs merge=lfs -text
78
+ deep_search/data_from_zhiyuan/data_for_paper/orignal_871/no_error_data_871.json filter=lfs diff=lfs merge=lfs -text
79
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_8_tagged.json filter=lfs diff=lfs merge=lfs -text
80
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_7_tagged.json filter=lfs diff=lfs merge=lfs -text
81
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset.json filter=lfs diff=lfs merge=lfs -text
82
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_6_tagged.json filter=lfs diff=lfs merge=lfs -text
83
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_4_tagged.json filter=lfs diff=lfs merge=lfs -text
84
+ deep_search/data_from_zhiyuan/data_for_paper/4-28_4k_question_1/final_selected_dataset.json filter=lfs diff=lfs merge=lfs -text
85
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_3_tagged.json filter=lfs diff=lfs merge=lfs -text
86
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_5_tagged.json filter=lfs diff=lfs merge=lfs -text
87
+ deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset_1.json filter=lfs diff=lfs merge=lfs -text
deep_search/data_from_zhiyuan/data_for_paper/4-28_4k_question_1/final_selected_dataset.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41d27c626a2eb5a4cd26df4c7a18477850a8493e540377617256230b2258ca3c
3
+ size 77422541
deep_search/data_from_zhiyuan/data_for_paper/orignal_871/no_error_data_871.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:638dac3e6cb9012d1d4498795195da16f89cefa0bbb42f554c606b2d872d659d
3
+ size 79107369
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/split_8_tagged.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:025fee5735b799bfac8e988aaf8e0f6f3b7343bc58aa58e12f116e09e4ab9be1
3
+ size 49212976
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/all_stats/domain_distribution_pie.png ADDED

Git LFS Details

  • SHA256: 877a4099d2cd4753bc9d50d802d20437a9dc1d301a525bb293a705b29a7b4aec
  • Pointer size: 131 Bytes
  • Size of remote file: 679 kB
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/all_stats/hop_histogram.png ADDED

Git LFS Details

  • SHA256: 898c97fa4c12d18b556af2965ed2eeea98b54547164db07f21c23eb207baa756
  • Pointer size: 130 Bytes
  • Size of remote file: 19.7 kB
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/domain_distribution_pie.png ADDED

Git LFS Details

  • SHA256: 9d8fa861bd6fed783465e24cada51d4b1f3a861b4af572d1166e1967b1d2bc2a
  • Pointer size: 131 Bytes
  • Size of remote file: 119 kB
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:975acf28da65eea6d3860afde69293afe7de9696160d9553cfd17984b0350f7e
3
+ size 16227596
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/final_selected_dataset_1.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72f8119923720b9435b918f13228ebc427a2a77cc6ee7e902b4fa0f84de15d07
3
+ size 16227596
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/hop_histogram.png ADDED

Git LFS Details

  • SHA256: 898c97fa4c12d18b556af2965ed2eeea98b54547164db07f21c23eb207baa756
  • Pointer size: 130 Bytes
  • Size of remote file: 19.7 kB
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_3_tagged.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8b8b62bca48beb7a8f480ba0e6d59ab776b49cb43caeb61fce05e954bbf4e4a
3
+ size 14884742
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_4_tagged.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6d045d5527a637d00c1e130268811919faa5cb7e40ee0315ab32cb447b3d76d
3
+ size 16859190
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_5_tagged.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c85b1a5a5bd73d40b173f2d5f10327318cc5b674ee78e9cacd4c93a641f0f7
3
+ size 17154595
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_6_tagged.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53b6f27b63c91aec868d2675212200d30cca1e92922140a9bacc4c96ac1e99c2
3
+ size 17098702
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/split_7_tagged.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bd2a100a7c5e890e98eb1c64aae35c0d1e172a3a717de8b10cd410ef5d8d78b
3
+ size 17795485
deep_search/data_from_zhiyuan/data_syn/data/mixed_data/splits/tagged_domain_keypoints/total_histogram.png ADDED

Git LFS Details

  • SHA256: e4bd4137d12e30c3619c9788c5eb29f011fa53e5b02609179bc1041d71b272f3
  • Pointer size: 130 Bytes
  • Size of remote file: 21.6 kB
deep_search/search_o1/scripts/__pycache__/bing_search.cpython-39.pyc ADDED
Binary file (10.3 kB). View file
 
deep_search/search_o1/scripts/__pycache__/evaluate.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
deep_search/search_o1/scripts/__pycache__/evaluate.cpython-311.pyc ADDED
Binary file (26.3 kB). View file
 
deep_search/search_o1/scripts/__pycache__/evaluate.cpython-39.pyc ADDED
Binary file (15.5 kB). View file
 
deep_search/search_o1/scripts/__pycache__/prompts.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/README.md ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # SimpleDeepSearcher: Deep Information Seeking via Web-Powered Reasoning Trajectory Synthesis
3
+
4
+
5
+ ## 🚀 Data Synthesis in Real Web Environment
6
+
7
+ ### 1. Launch the Summary Model
8
+
9
+ ```bash
10
+ export CUDA_VISIBLE_DEVICES=0,1
11
+ vllm serve "YOUR_SUMMARY_MODEL_PATH" \
12
+ --tensor-parallel-size=2 \
13
+ --gpu-memory-utilization 0.95 \
14
+ --port 8000 > output/vllm_serve.log 2>&1 &
15
+ ```
16
+
17
+ ---
18
+
19
+ ### 2. Generate Inference Search Trajectories
20
+
21
+ ```bash
22
+ export CUDA_VISIBLE_DEVICES=0,1
23
+ python -u inference/sythesis_data.py \
24
+ --dataset_name YOUR_DATASET_NAME \
25
+ --rollout_num YOUR_ROLLOUT_NUM \
26
+ --cache_dir_base cache \
27
+ --output_dir_base output \
28
+ --model_path "/path/to/your/reasoning_model" \
29
+ --summarization_model_path "/path/to/your/summarization_model" \
30
+ --summarization_model_url YOUR_SUMMARIZATION_MODEL_URL \
31
+ --google_subscription_key YOUR_KEY \
32
+ --google_endpoint "https://google.serper.dev/search" > output/output.log 2>&1
33
+ ```
34
+
35
+ ---
36
+
37
+ ## 🎯 Data Construction
38
+
39
+ ### 🔍 Query Sampling
40
+
41
+ #### 1. Annotate Domains and Keywords of Labeled Data
42
+
43
+ ```bash
44
+ python process_data/query_sampling/data_tag_domain_keypoints.py \
45
+ --input_file_path "/path/to/your/input.json" \
46
+ --cuda_visible_devices "0,1" \
47
+ --model_path "/path/to/your/tag_model"
48
+ ```
49
+
50
+ #### 2. Extract Domains and Keywords
51
+
52
+
53
+ ```bash
54
+ python process_data/query_sampling/extract_domain_keypoints.py \
55
+ --input_file_path "/path/to/your/input_tagged.json" \
56
+ --output_file_path "/path/to/your/output_extracted.json"
57
+ ```
58
+
59
+ #### 3. Count Number of Units
60
+
61
+ ```bash
62
+ python process_data/query_sampling/units_count.py \
63
+ --input_file "/path/to/your/output_extracted.json"
64
+ ```
65
+
66
+ #### 4. Sample Questions
67
+
68
+ ```bash
69
+ python process_data/query_sampling/query_sampling.py \
70
+ --input_file "/path/to/your/output_extracted.json" \
71
+ --total_samples YOUR_TOTAL_SAMPLES_NUMBER
72
+ ```
73
+
74
+ ---
75
+
76
+ ### 💬 Response Curation
77
+
78
+ #### 1. Filter Responses
79
+
80
+ ```bash
81
+ python process_data/repsonse_curation/response_curation.py \
82
+ --root_path "/path/to/your/synthesis_data" \
83
+ --output_path "/path/to/your/curated_data"
84
+
85
+ #### 2. Format Data
86
+
87
+ ```bash
88
+ python process_data/repsonse_curation/format_data.py \
89
+ --input_file "/path/to/your/input_file.json"
90
+ ```
91
+
92
+ #### 3. Format Check
93
+
94
+ ```bash
95
+ python process_data/repsonse_curation/format_filter.py \
96
+ --input_file "/path/to/your/formatted_data.json"
97
+ ```
98
+
99
+ ---
100
+
101
+ ## 🧠 SFT Training
102
+
103
+ > Run the following script after replacing the corresponding variables:
104
+
105
+ ```bash
106
+ export OMP_NUM_THREADS=20
107
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
108
+
109
+ # Define parameters
110
+ lr=1e-5
111
+ base="BACKBONE" # path to base model
112
+ tokenizer="TOKENIZER" # path to tokenizer
113
+ train_data="sft/data/training_data_871.json" # path to train data
114
+ bsz=2 # batch size
115
+ acc=4 # gradient accumulation steps
116
+
117
+ # Create output directory
118
+ JOB_ID=$(( RANDOM % 100000 )) # random JOB-ID
119
+ JOB_NAME=YOUR_JOB_NAME
120
+ output_dir="sft/output/JOB:${JOB_ID}#${JOB_NAME}"
121
+ mkdir -p "$output_dir"
122
+
123
+ echo "output_dir: ${output_dir}"
124
+
125
+ # Execute deepspeed command
126
+ deepspeed \
127
+ --master_port=9944 \
128
+ sft/sft.py.py \
129
+ --deepspeed sft/ds_zero3_offload.json \
130
+ --model_name_or_path $base \
131
+ --tokenizer_name_or_path $tokenizer \
132
+ --do_train \
133
+ --save_safetensors true \
134
+ --data_path $train_data \
135
+ --lr_scheduler_type cosine \
136
+ --output_dir $output_dir \
137
+ --overwrite_output_dir \
138
+ --warmup_ratio 0.03 \
139
+ --gradient_checkpointing true \
140
+ --per_device_train_batch_size $bsz \
141
+ --gradient_accumulation_steps $acc \
142
+ --logging_steps 1 \
143
+ --learning_rate "$lr" \
144
+ --num_train_epochs 6 \
145
+ --save_strategy epoch \
146
+ --save_only_model true \
147
+ --model_max_length 30000 \
148
+ --save_total_limit 5 \
149
+ --bf16 || exit 1
150
+ ```
151
+
152
+
153
+
154
+ ### Eval
155
+
156
+ ```bash
157
+ export CUDA_VISIBLE_DEVICES=0,1
158
+ python -u inference/inference.py \
159
+ --dataset_name YOUR_DATASET_NAME \
160
+ --cache_dir_base cache \
161
+ --output_dir_base output \
162
+ --model_path "/path/to/your/reasoning_model" \
163
+ --summarization_model_path "/path/to/your/summarization_model" \
164
+ --summarization_model_url YOUR_SUMMARIZATION_MODEL_URL \
165
+ --google_subscription_key YOUR_KEY \
166
+ --google_endpoint "https://google.serper.dev/search" > output/output.log 2>&1
167
+ ```
168
+
169
+
170
+ LLM as a Judge
171
+
172
+ ```bash
173
+ python eval/gpt_eval.py
174
+ ```
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/cache/search_cache.json ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "东北大学沈阳 校长": {
3
+ "searchParameters": {
4
+ "q": "东北大学沈阳 校长",
5
+ "type": "search",
6
+ "num": 11,
7
+ "mkt": "en-US",
8
+ "setLang": "en",
9
+ "engine": "google"
10
+ },
11
+ "answerBox": {
12
+ "snippet": "冯夏庭,工学博士,教授,博士生导师,中国工程院院士。 现任东北大学党委副书记、校长。",
13
+ "snippetHighlighted": [
14
+ "冯夏庭"
15
+ ],
16
+ "title": "冯夏庭 - 东北大学",
17
+ "link": "https://www.neu.edu.cn/info/1012/3311.htm"
18
+ },
19
+ "organic": [
20
+ {
21
+ "title": "现任领导 - 东北大学",
22
+ "link": "https://www.neu.edu.cn/xygk/xrld.htm",
23
+ "snippet": "党委书记. 郭海 · 校长. 冯夏庭 · 学术委员会主任. 柴天佑 · 党委副书记. 孙正林 · 党委副书记、纪委书记. 吴豪伟 · 党委副书记. 张皓 · 副校长. 徐峰 · 副校长. 唐立新.",
24
+ "position": 1
25
+ },
26
+ {
27
+ "title": "东北大学(中国) - 维基百科,自由的百科全书",
28
+ "link": "https://zh.wikipedia.org/zh-hans/%E4%B8%9C%E5%8C%97%E5%A4%A7%E5%AD%A6_(%E4%B8%AD%E5%9B%BD)",
29
+ "snippet": "东北大学主校区位于辽宁省沈阳 ... 张学良:奉系军阀领袖张作霖之长子,曾任中华民国陆军一级上将、东北保安军总司令、东北大学校长、东北大学名誉校长、哈尔滨工业大学名誉 ...",
30
+ "position": 2
31
+ },
32
+ {
33
+ "title": "历任领导 - 东北大学",
34
+ "link": "https://www.neu.edu.cn/xygk/lrld.htm",
35
+ "snippet": "历任领导 ; 王永江. ​1923年4月至1927年10月. 兼任东北大学校长 ; 刘尚清. 1927年11月至1928年8月. 兼任东北大学校长 ; 张学良. 1928年8月至1937年1月. 兼任东北大学校长.",
36
+ "position": 3
37
+ },
38
+ {
39
+ "title": "东北大学_百度百科",
40
+ "link": "https://baike.baidu.com/item/%E4%B8%9C%E5%8C%97%E5%A4%A7%E5%AD%A6/18014",
41
+ "snippet": "1928年8月,主政东北的张学良将军兼任东北大学校长。在他的领导下,学校明确了东北大学的办学宗旨、目的:“研究高深学术,培养专门人才,应社会之需要,谋文化之发展”。 ... 同年9 ...",
42
+ "position": 4
43
+ },
44
+ {
45
+ "title": "现任领导 - 东北大学秦皇岛分校",
46
+ "link": "https://www.neuq.edu.cn/xxgk1/xrld.htm",
47
+ "snippet": "赵勇,男,汉族,1973年5月生,辽宁沈阳人,1994年6月加入中国共产党,2001年5月参加工作,工学博士,教授,博士生导师。现任东北大学校长助理,秦皇岛分校党委副书记、校长。",
48
+ "position": 5
49
+ },
50
+ {
51
+ "title": "东北大学校友总会第七届校友代表大会召开 - 辽宁",
52
+ "link": "https://liaoning.eol.cn/lngd/202309/t20230918_2488289.shtml",
53
+ "snippet": "会议宣读并通过东北大学校友总会第七届理事会组成机构名单。校长冯夏庭担任会长,副校长孙雷、王辉担任副会长,理事会由海内外各地校友会代表和学校领导、 ...",
54
+ "date": "Sep 18, 2023",
55
+ "position": 6
56
+ },
57
+ {
58
+ "title": "东北大学、沈阳市高新区领导一行来我院调研交流",
59
+ "link": "https://nbidut.dlut.edu.cn/info/1018/28380.htm",
60
+ "snippet": "3月21日上午,东北大学校长助理刘延晖、沈阳市高新区科技创新局科技招商部部长敬亚豪等一行人来我院调研交流,我院院长郝海、副院长张博及相关部门负责人陪同调研。",
61
+ "position": 7
62
+ },
63
+ {
64
+ "title": "冯夏庭任东北大学校长 - 教育部",
65
+ "link": "http://www.moe.gov.cn/jyb_xwfb/gzdt_gzdt/s5987/202102/t20210202_512233.html",
66
+ "snippet": "冯夏庭,男,1964年9月生,1985年6月入党,1992年4月参加工作,东北工学院矿山建设工程专业博士研究生毕业,教授,中国工程院院士。2017年8月任东北大学党委常委 ...",
67
+ "date": "Feb 2, 2021",
68
+ "attributes": {
69
+ "Missing": "沈阳 | Show results with:沈阳"
70
+ },
71
+ "position": 8
72
+ },
73
+ {
74
+ "title": "党委副书记、校长:赵勇 - 东北大学秦皇岛分校",
75
+ "link": "https://www.neuq.edu.cn/info/1030/5850.htm",
76
+ "snippet": "赵勇,男,汉族,1973年5月生,辽宁沈阳人,1994年6月加入中国共产党,2001年5月参加工作,工学博士,教授,博士生导师。 现任东北大学校长助理,秦皇岛分校党委副书记、校长。",
77
+ "position": 9
78
+ },
79
+ {
80
+ "title": "东北大学校长赵继一行访问和平区政府 - 新闻中心- 中国教育在线",
81
+ "link": "https://news.eol.cn/dongtai/202004/t20200408_1720520.shtml",
82
+ "snippet": "近日,校长赵继、副校长唐立新一行访问沈阳市和平区政府,科学技术研究院相关负责人陪同访问。赵继一行与和平区区长马原就��进东北大学-三好街科技成果 ...",
83
+ "date": "Apr 8, 2020",
84
+ "position": 10
85
+ }
86
+ ],
87
+ "peopleAlsoAsk": [
88
+ {
89
+ "question": "东北大学相当于国内什么大学?",
90
+ "snippet": "东北大学在国内的排名相当于国内一流大学,如清华大学、北京大学、复旦大学、上海交通大学等。 东北大学的学术研究和教学水平也得到了国内外学者的认可,在国内外学术界享有盛誉。",
91
+ "title": "东北大学相当于国内哪个大学 - 指南者留学",
92
+ "link": "https://m.compassedu.hk/wb/153368"
93
+ },
94
+ {
95
+ "question": "大学校长属于什么级别?",
96
+ "snippet": "行政级别:正厅级(相当于厅长级)。",
97
+ "title": "举例说明各级别大学校长是什么行政级别? - 网易",
98
+ "link": "https://www.163.com/dy/article/JQNUPF2K05364542.html"
99
+ },
100
+ {
101
+ "question": "东北大学在沈阳吗?",
102
+ "snippet": "学校坐落在东北中心城市辽宁省沈阳市,在沈阳市建有南湖校区、浑南校区和沈河校区,在秦皇岛市设有东北大学秦皇岛分校,在佛山市设有佛山研究生创新学院。 占地总面积264.6万平方米,建筑面积194万平方米。",
103
+ "title": "学校简介 - 东北大学",
104
+ "link": "https://www.neu.edu.cn/xygk/xxjj.htm"
105
+ },
106
+ {
107
+ "question": "中国东北大学是985大学吗?",
108
+ "snippet": "东北大学始建于1923年4月,是一所具有爱国主义光荣传统的大学,是国家首批“211工程”和“985工程”重点建设的教育部直属高校,2017年入选国家首轮世界一流大学建设高校,2022年入选国家第二轮“双一流”建设高校。",
109
+ "title": "学校简介-东北大学秦皇岛分校",
110
+ "link": "https://www.neuq.edu.cn/xxgk1/xxjj.htm"
111
+ }
112
+ ],
113
+ "relatedSearches": [
114
+ {
115
+ "query": "东北大学在哪"
116
+ },
117
+ {
118
+ "query": "东北大学排名"
119
+ },
120
+ {
121
+ "query": "中国东北大学qs排名"
122
+ },
123
+ {
124
+ "query": "沈阳东北大学"
125
+ },
126
+ {
127
+ "query": "东北大学 副校长"
128
+ },
129
+ {
130
+ "query": "东北大学软件学院"
131
+ },
132
+ {
133
+ "query": "东北大学电话"
134
+ },
135
+ {
136
+ "query": "东北大学邮编"
137
+ }
138
+ ],
139
+ "credits": 2
140
+ },
141
+ "北京市市长是谁": {
142
+ "searchParameters": {
143
+ "q": "北京市市长是谁",
144
+ "type": "search",
145
+ "num": 11,
146
+ "mkt": "en-US",
147
+ "setLang": "en",
148
+ "engine": "google"
149
+ },
150
+ "answerBox": {
151
+ "title": "Mayor of Beijing / Officeholder",
152
+ "answer": "Yin Yong"
153
+ },
154
+ "organic": [
155
+ {
156
+ "title": "殷勇_市长_首都之窗 - 北京市人民政府",
157
+ "link": "https://www.beijing.gov.cn/gongkai/sld/szfld/sz/202210/t20221028_2847604.html",
158
+ "snippet": "殷勇,男,汉族,1969年8月生,研究生,工学博士,中共党员。现任二十届中央委员,十四届全国人大代表,北京市委副书记,市政府党组书记、市长。 工作分工. 领导市政府全面工作, ...",
159
+ "position": 1
160
+ },
161
+ {
162
+ "title": "殷勇(1969年) - 维基百科,自由的百科全书",
163
+ "link": "https://zh.wikipedia.org/zh-hans/%E6%AE%B7%E5%8B%87_(1969%E5%B9%B4)",
164
+ "snippet": "殷勇(1969年8月—),男,湖北武汉人,清华大学自动控制和企业管理双学士,哈佛大学肯尼迪政府学院公共管理硕士、清华大学系统工程博士。中华人民共和国政治人物,长期在国家 ...",
165
+ "position": 2
166
+ },
167
+ {
168
+ "title": "北京市市长列表- 维基百科,自由的百科全书",
169
+ "link": "https://zh.wikipedia.org/zh-hans/%E5%8C%97%E4%BA%AC%E5%B8%82%E5%B8%82%E9%95%BF%E5%88%97%E8%A1%A8",
170
+ "snippet": "彭真,1951年2月26日至1966年5月23日在任。1954年8月前为北京市人民政府市长,1954年8月后称北京市人民委员会市长。 · 吴德,1966年5月23日至1967年4月20日任北京市人民委员会 ...",
171
+ "position": 3
172
+ },
173
+ {
174
+ "title": "市领导_首都之窗_北京市人民政府门户网站",
175
+ "link": "https://www.beijing.gov.cn/gongkai/sld/",
176
+ "snippet": "市委领导. 书记:尹力 ; 市人大常委会领导. 主任:李秀领. 齐静 ; 市政府领导. 市长:殷勇. 夏林茂 ; 市政协领导. 主席:魏小东. 崔述强 ...",
177
+ "attributes": {
178
+ "Missing": "是 | Show results with:是"
179
+ },
180
+ "sitelinks": [
181
+ {
182
+ "title": "殷勇",
183
+ "link": "https://www.beijing.gov.cn/gongkai/sld/szfld/sz/202210/t20221028_2847604.html"
184
+ },
185
+ {
186
+ "title": "书记:尹力",
187
+ "link": "https://www.beijing.gov.cn/gongkai/sld/swld/swsj/202211/t20221113_2857628.html"
188
+ },
189
+ {
190
+ "title": "孙军民常委",
191
+ "link": "https://www.beijing.gov.cn/gongkai/sld/swld/swcw/202207/t20220701_2761151.html"
192
+ },
193
+ {
194
+ "title": "游钧副书记",
195
+ "link": "https://www.beijing.gov.cn/gongkai/sld/swld/swfsj/202502/t20250227_4020102.html"
196
+ }
197
+ ],
198
+ "position": 4
199
+ },
200
+ {
201
+ "title": "殷勇_百度百科",
202
+ "link": "https://baike.baidu.com/item/%E6%AE%B7%E5%8B%87/18433971",
203
+ "snippet": "殷勇,男,汉族,1969年8月生,湖北武汉人,1994年5月加入中国共产党,1997年1月参加工作,研究生毕业(清华大学系统工程专业),工学博士。现任第二十届中央委员,北京市委副书记 ...",
204
+ "position": 5
205
+ },
206
+ {
207
+ "title": "北京市人民政府_百度百科",
208
+ "link": "https://baike.baidu.com/item/%E5%8C%97%E4%BA%AC%E5%B8%82%E4%BA%BA%E6%B0%91%E6%94%BF%E5%BA%9C/8249521",
209
+ "snippet": "现任领导:第二十届中央委员,北京市委副书记,市人民政府市长、党组书记:殷勇。 北京市通州区运河东大街57号是目标地址。",
210
+ "position": 6
211
+ },
212
+ {
213
+ "title": "中共中央政治局委员、北京市委书记蔡奇调研清华大学并作报告",
214
+ "link": "https://www.tsinghua.edu.cn/info/1177/22525.htm",
215
+ "snippet": "清华新闻网11月28日电(记者赵姝婧)11月27日下午,中共中央政治局委员、北京市委书记蔡奇来到清华大学调研,并作主题为“贯彻落实习近平总书记对北京重要讲话精神,更加奋发有为 ...",
216
+ "position": 7
217
+ },
218
+ {
219
+ "title": "985校长,任北京市副市长! - 学术桥",
220
+ "link": "https://www.acabridge.cn/news/202403/t20240329_2590696.shtml",
221
+ "snippet": "北京日报消息,3月29日上午,北京市十六届人大常委会第九次会议表决,决定任命马骏、穆鹏为北京市副市长。 公开资料显示,马骏,男,汉族,1969年4月出生, ...",
222
+ "date": "Mar 29, 2024",
223
+ "position": 8
224
+ },
225
+ {
226
+ "title": "北京历任市长 - 财经- 搜狐",
227
+ "link": "https://business.sohu.com/20040918/n222116317.shtml",
228
+ "snippet": "1949年迄今,北京先后有过13任市长,他们是:叶剑英、聂荣臻、彭真、吴德、谢富治、林乎加、焦若愚、陈希同、李其炎、贾庆林、刘淇、孟学农、王岐山。 叶剑英",
229
+ "date": "Sep 18, 2004",
230
+ "position": 9
231
+ },
232
+ {
233
+ "title": "北京党政领导人物库 - 地方经济",
234
+ "link": "http://district.ce.cn/zt/rwk/sf/bj/index.shtml",
235
+ "snippet": "北京 ; 尹力, 市委书记 ; 殷勇, 市委副书记、市长 ; 游钧, 市委副书记、组织部部长 ; 陈健, 市委常委、市纪委书记,市监委主任 ; 夏林茂, 市委常委、市政府党组副书记、副市长.",
236
+ "attributes": {
237
+ "Missing": "是 | Show results with:是"
238
+ },
239
+ "position": 10
240
+ }
241
+ ],
242
+ "images": [
243
+ {
244
+ "title": "殷勇_市长_首都之窗_北京市人民政府门户网站",
245
+ "imageUrl": "https://www.beijing.gov.cn/gongkai/sld/szfld/sz/202210/W020230119731299534682.jpg",
246
+ "link": "https://www.beijing.gov.cn/gongkai/sld/szfld/sz/202210/t20221028_2847604.html"
247
+ },
248
+ {
249
+ "title": "市领导_首都之窗_北京市人民政府门户网站",
250
+ "imageUrl": "https://www.beijing.gov.cn/gongkai/sld/swld/swsj/202211/W020230119725612138202.jpg",
251
+ "link": "https://www.beijing.gov.cn/gongkai/sld/"
252
+ },
253
+ {
254
+ "title": "蔡奇当选北京市长张硕辅当选北京市监察委主任-中国新闻网",
255
+ "imageUrl": "http://www.chinanews.com/2017/0120/2017120133457.jpg",
256
+ "link": "https://www.chinanews.com.cn/m/gn/2017/01-20/8130471.shtml"
257
+ },
258
+ {
259
+ "title": "孙硕_副市长_首都之窗_北京市人民政府门户网站",
260
+ "imageUrl": "https://www.beijing.gov.cn/gongkai/sld/szfld/fsz/202407/W020240726400551421724.jpg",
261
+ "link": "https://www.beijing.gov.cn/gongkai/sld/szfld/fsz/202407/t20240726_3760113.html"
262
+ },
263
+ {
264
+ "title": "马骏_副市长_首都之窗_北京市人民政府门户网站",
265
+ "imageUrl": "https://www.beijing.gov.cn/gongkai/sld/szfld/fsz/202403/W020240329560640219878.jpg",
266
+ "link": "https://www.beijing.gov.cn/gongkai/sld/szfld/fsz/202403/t20240329_3605299.html"
267
+ },
268
+ {
269
+ "title": "靳伟_副市长_首都之窗_北京市人民政府门户网站",
270
+ "imageUrl": "https://www.beijing.gov.cn/gongkai/sld/szfld/fsz/202011/W020230119737138365717.jpg",
271
+ "link": "https://www.beijing.gov.cn/gongkai/sld/szfld/fsz/202011/t20201130_2153165.html"
272
+ },
273
+ {
274
+ "title": "殷勇当选北京市市长| 联合早报",
275
+ "imageUrl": "https://cassette.sphdigital.com.sg/image/zaobao/82cbc4ffc19f2f7229158de5188ce5475b6fcbbdfa63f765ea61c93373c4795d?o=zbimg&oloc=se",
276
+ "link": "https://www.zaobao.com.sg/realtime/china/story20230119-1354938"
277
+ },
278
+ {
279
+ "title": "殷勇(1969年) - 维基百科,自由的百科全书",
280
+ "imageUrl": "https://upload.wikimedia.org/wikipedia/commons/e/ee/Ambassador_Burns_speaks_with_Beijing_Mayor_Yin_Yong.png",
281
+ "link": "https://zh.wikipedia.org/zh-cn/%E6%AE%B7%E5%8B%87_(1969%E5%B9%B4)"
282
+ },
283
+ {
284
+ "title": "薄熙来- 维基百科,自由的百科全书",
285
+ "imageUrl": "https://upload.wikimedia.org/wikipedia/commons/8/80/Bo-_Ferrero-Waldner_meeting_%282007%29.jpg",
286
+ "link": "https://zh.wikipedia.org/zh-cn/%E8%96%84%E7%86%99%E6%9D%A5"
287
+ }
288
+ ],
289
+ "peopleAlsoAsk": [
290
+ {
291
+ "question": "市委书记和市长谁大?",
292
+ "snippet": "由于中华人民共和国政府各级行政机构实行党委负责制,市委书记是所在市的实际最高负责人,地位在市人大选出的市长之上。",
293
+ "title": "市委书记- 维基百科,自由的百科全书",
294
+ "link": "https://zh.wikipedia.org/zh-cn/%E5%B8%82%E5%A7%94%E6%9B%B8%E8%A8%98"
295
+ },
296
+ {
297
+ "question": "北京市区长是什么级别?",
298
+ "snippet": "区委书记低于市委书记,区长是一个区人民政府的最高领导人,其地位仅次于该区党委最高领导人区委书记,一般区长也兼任该区的区(县)委副书记,区长相当于县长。",
299
+ "title": "区委书记- 维基百科,自由的百科全书",
300
+ "link": "https://zh.wikipedia.org/zh-cn/%E5%8C%BA%E5%A7%94%E4%B9%A6%E8%AE%B0"
301
+ },
302
+ {
303
+ "question": "殷勇的背景是什么?",
304
+ "snippet": "殷勇是湖北武汉蔡甸人。 1987年,他考入清华大学自动化系。 1992年,殷勇本科毕业,被推荐到清华大学经济管理学院直接攻读博士学位,师从中国系统工程学科的开创人之一、清华大学工业自动化专业和系统工程专业创办者郑维敏教授,与中国人民银行原行长周小川师出同门。",
305
+ "title": "殷勇(1969年) - 维基百科,自由的百科全书",
306
+ "link": "https://zh.wikipedia.org/zh-hans/%E6%AE%B7%E5%8B%87_(1969%E5%B9%B4)"
307
+ },
308
+ {
309
+ "question": "北京市有市长吗?",
310
+ "snippet": "市政府领导 殷勇,男,汉族,1969年8月生,研究生,工学博士,中共党员。 现任二十届中央委员,十四届全国人大代表,北京市委副书记,市政府党组书记、市长。",
311
+ "title": "殷勇_市长_首都之窗 - 北京市人民政府",
312
+ "link": "https://www.beijing.gov.cn/gongkai/sld/szfld/sz/202210/t20221028_2847604.html"
313
+ }
314
+ ],
315
+ "relatedSearches": [
316
+ {
317
+ "query": "北京市委书记"
318
+ },
319
+ {
320
+ "query": "北京市长什么级别"
321
+ },
322
+ {
323
+ "query": "北京市副市长被查"
324
+ },
325
+ {
326
+ "query": "北京市书记"
327
+ },
328
+ {
329
+ "query": "上海市长"
330
+ },
331
+ {
332
+ "query": "北京市政府"
333
+ },
334
+ {
335
+ "query": "北京市历任市长"
336
+ },
337
+ {
338
+ "query": "北京市前市长"
339
+ }
340
+ ],
341
+ "credits": 2
342
+ },
343
+ "纸牌屋 最终季首播年份": {
344
+ "searchParameters": {
345
+ "q": "纸牌屋 最终季首播年份",
346
+ "type": "search",
347
+ "num": 11,
348
+ "mkt": "en-US",
349
+ "setLang": "en",
350
+ "engine": "google"
351
+ },
352
+ "answerBox": {
353
+ "snippet": "《纸牌屋第二季》于2014年2月14日播出。 《纸牌屋第三季》于2015年2月27日播出。 《纸牌屋第四季》于2016年3月4日在美国播出,《纸牌屋第五季》于2017年5月30日在美国播出,最终季《纸牌屋第六季》于2018年11月2日在美国Netflix播出。",
354
+ "snippetHighlighted": [
355
+ "2018年11月2日"
356
+ ],
357
+ "title": "纸牌屋_百度百科",
358
+ "link": "https://baike.baidu.com/item/%E7%BA%B8%E7%89%8C%E5%B1%8B/75728"
359
+ },
360
+ "organic": [
361
+ {
362
+ "title": "纸牌屋第六季_百度百科",
363
+ "link": "https://baike.baidu.com/item/%E7%BA%B8%E7%89%8C%E5%B1%8B%E7%AC%AC%E5%85%AD%E5%AD%A3/22460232",
364
+ "snippet": "于2018年11月2日在美国Netflix播出。 讲述了美国政治家弗兰克·麦卡锡和他同样野心勃勃的妻子卡罗尔·麦卡锡在华盛顿特区白宫中运作权力的故事。 中文名. 纸牌屋第六季.",
365
+ "position": 1
366
+ },
367
+ {
368
+ "title": "纸牌屋(美国剧集) - 维基百科,自由的百科全书",
369
+ "link": "https://zh.wikipedia.org/zh-hans/%E7%B4%99%E7%89%8C%E5%B1%8B_(%E7%BE%8E%E5%9C%8B%E5%8A%87%E9%9B%86)",
370
+ "snippet": "两部剧集都是基于迈克尔·多布斯同名小说创作的。第一季全部13集于2013年2月1日在流媒体网站奈飞首播。 ... 第二季的13集于2014年2月14日全部释出。 ... 2014年2月4日,即第二季 ...",
371
+ "position": 2
372
+ },
373
+ {
374
+ "title": "纸牌屋(第六季) - 维基百科",
375
+ "link": "https://zh.wikipedia.org/zh-hans/%E7%B4%99%E7%89%8C%E5%B1%8B_(%E7%AC%AC%E5%85%AD%E5%AD%A3)",
376
+ "snippet": "美国政治网络电视剧《纸牌屋》(英语:House of Cards)的第六季于2017年12月4日被Netflix续订,并安排于2018年11月2日播出。不同于之前由十三集组成的季度,第六季只有八集 ...",
377
+ "position": 3
378
+ },
379
+ {
380
+ "title": "纸牌屋第六季 - 抖音百科",
381
+ "link": "https://m.baike.com/wikiid/7714418908910672668",
382
+ "snippet": "《纸牌屋第六季》(House of Cards Season 6)是一部由奈飞公司(Netflix)出品的美国政治题材电视剧,是纸牌屋系列的最终季。该剧于2018年11月2日在美国Netflix播出,",
383
+ "position": 4
384
+ },
385
+ {
386
+ "title": "《纸牌屋》发布第六季片花女总统独挑最终季大梁",
387
+ "link": "https://www.huijiwiki.com/wiki/%E6%96%B0%E9%97%BB:2018/03/06/%E3%80%8A%E7%BA%B8%E7%89%8C%E5%B1%8B%E3%80%8B%E5%8F%91%E5%B8%83%E7%AC%AC%E5%85%AD%E5%AD%A3%E7%89%87%E8%8A%B1_%E5%A5%B3%E6%80%BB%E7%BB%9F%E7%8B%AC%E6%8C%91%E6%9C%80%E7%BB%88%E5%AD%A3%E5%A4%A7%E6%A2%81",
388
+ "snippet": "2013年,美剧《纸牌屋》的首播引起不小轰动,这部讲述美国政坛权力的游戏的剧集以其人性的深度刻画、揭露政坛的另一面和偶尔的黑色幽默桥段获得了无数人的 ...",
389
+ "date": "Mar 6, 2018",
390
+ "position": 5
391
+ },
392
+ {
393
+ "title": "纸牌屋第六季House of Cards Season 6 (2018) - 豆瓣电影",
394
+ "link": "https://movie.douban.com/subject/27185590//",
395
+ "snippet": "首播: 2018-11-02(美国) 集数: 8 单集片长: 55分钟 又名: 纸牌屋最终季/ 众议院要人/ House of Cards Final season IMDb: tt7538918. 豆瓣评分. 引用. 5.6. 18073人评价. 5 ...",
396
+ "rating": 5.6,
397
+ "ratingMax": 10,
398
+ "ratingCount": 18073,
399
+ "position": 6
400
+ },
401
+ {
402
+ "title": "捍卫自己的命运!Netflix放出《纸牌屋》最终季正式预告 - 机核",
403
+ "link": "https://www.gcores.com/articles/102742",
404
+ "snippet": "导语:今晚,Netflix 放出了《纸牌屋》最终季正式预告。本季剧集将于11月2日在官网全面上线。 《纸牌屋》最终季. 自2013年首播以来,由Netflix 出品的《纸牌屋》一直备受观众 ...",
405
+ "position": 7
406
+ },
407
+ {
408
+ "title": "纸牌屋_ 百科",
409
+ "link": "https://baike.weixin.qq.com/v167169186.htm?baike_inner=tv",
410
+ "snippet": "播出时间2018年11月2日. 集数13集. 剧情简介. 本季将是全剧季 ... 2018年7月4日,《纸牌屋》最终季发布了第二支宣传视频。 2018年8月,《纸牌屋》第六季发布最新剧照。",
411
+ "position": 8
412
+ },
413
+ {
414
+ "title": "Kevin Spacey 彻底“消失”,《纸牌屋》最终季将于2018 年开拍",
415
+ "link": "https://nowre.com/lifestyle/283319/kevin-spacey-chedi-xiaoshizhipaiwuzuizhongjijiangyu-2018-niankaipai/",
416
+ "snippet": "Kevin Spacey 彻底“消失”,《纸牌屋》最终季将于2018 年开拍 ... 第一夫人成为绝对主角。 Kevin Spacey 遭到Netflix 封杀后,《纸牌屋》的最终季何时才会启动 ...",
417
+ "date": "Dec 5, 2017",
418
+ "position": 9
419
+ },
420
+ {
421
+ "title": "弗兰克最后的结局- 喜马拉雅手机版",
422
+ "link": "https://m.ximalaya.com/ask/q11091050",
423
+ "snippet": "《纸牌屋第三季》于2015年2月27日播出。 《纸牌屋第四季》于2016年3月4日在美国播出《纸牌屋第五季》于2017年5月30日在美国播出,最终季《纸牌屋第六季》于 ...",
424
+ "date": "Nov 26, 2023",
425
+ "position": 10
426
+ }
427
+ ],
428
+ "peopleAlsoAsk": [
429
+ {
430
+ "question": "纸牌屋第六季弗兰克是怎么死的?",
431
+ "snippet": "在《纸牌屋》第六季的预告片中,显示安德伍德于2017年去世,他被安葬在位于南卡罗来纳州的父亲的墓旁。 该系列结局也显示,他的得力助手道格·斯坦普(迈克尔·凯利饰演)毒杀了他,以阻止他杀死不肯赦免自己的克莱尔。",
432
+ "title": "弗兰克·安德伍德 - 维基百科",
433
+ "link": "https://zh.wikipedia.org/zh-cn/%E5%BC%97%E5%85%B0%E5%85%8B%C2%B7%E5%AE%89%E5%BE%B7%E4%BC%8D%E5%BE%B7"
434
+ },
435
+ {
436
+ "question": "纸牌屋第六季有几集?",
437
+ "snippet": "美国政治网络电视剧《纸牌屋》(英语:House of Cards)的第六季于2017年12月4日被Netflix续订,并安排于2018年11月2日播出。 不同于之前由十三集组成的季度,第六季只有八集。 本季不会包括由于遭受性侵指控而被解雇的前主角凯文·史派西。",
438
+ "title": "纸牌屋(第六季) - 维基百科",
439
+ "link": "https://zh.wikipedia.org/zh-hans/%E7%B4%99%E7%89%8C%E5%B1%8B_(%E7%AC%AC%E5%85%AD%E5%AD%A3)"
440
+ },
441
+ {
442
+ "question": "纸牌屋完结了吗?",
443
+ "snippet": "《纸牌屋第六季》(House of Cards Season 6)是由奈飞公司(Netflix)出品的政治题材电视剧,是美剧《纸牌屋》系列的第六季,也是该系列最终季。",
444
+ "title": "纸牌屋第六季_百度百科",
445
+ "link": "https://baike.baidu.com/item/%E7%BA%B8%E7%89%8C%E5%B1%8B%E7%AC%AC%E5%85%AD%E5%AD%A3/22460232"
446
+ },
447
+ {
448
+ "question": "纸牌屋一共有几季?",
449
+ "snippet": "纸牌屋(美国剧集)\n纸牌屋 House of Cards\n\n国家/地区\n美国\n语言\n英语\n季数\n6\n集数\n73(每集列表)",
450
+ "title": "纸牌屋(美国剧集) - 维基百科,自由的百科全书",
451
+ "link": "https://zh.wikipedia.org/zh-hans/%E7%B4%99%E7%89%8C%E5%B1%8B_(%E7%BE%8E%E5%9C%8B%E5%8A%87%E9%9B%86)"
452
+ }
453
+ ],
454
+ "relatedSearches": [
455
+ {
456
+ "query": "纸牌屋第七季"
457
+ },
458
+ {
459
+ "query": "纸牌屋第六季"
460
+ },
461
+ {
462
+ "query": "纸牌屋第一季"
463
+ },
464
+ {
465
+ "query": "纸牌屋第三季"
466
+ },
467
+ {
468
+ "query": "纸牌屋第五季"
469
+ },
470
+ {
471
+ "query": "纸牌屋弗兰克怎么死的"
472
+ },
473
+ {
474
+ "query": "纸牌屋第六季在线"
475
+ },
476
+ {
477
+ "query": "纸牌屋第二季"
478
+ }
479
+ ],
480
+ "credits": 2
481
+ },
482
+ "2018平昌冬奥会男子短道速滑500米破世界纪录运动员": {
483
+ "searchParameters": {
484
+ "q": "2018平昌冬奥会男子短道速滑500米破世界纪录运动员",
485
+ "type": "search",
486
+ "num": 11,
487
+ "mkt": "en-US",
488
+ "setLang": "en",
489
+ "engine": "google"
490
+ },
491
+ "answerBox": {
492
+ "snippet": "2018年2月,平昌冬奥短道速滑男子500米决赛,武大靖打破世界纪录并夺冠,为中国赢得平昌冬奥首枚金牌;2月25日,担任平昌冬奥会闭幕式中国代表团旗手;12月,被新华社体育部评为2018年国际十佳运动员。 2019年1月,任中国奥委会委员;9月,获第七届全国道德模范“全国敬业奉献模范”。",
493
+ "snippetHighlighted": [
494
+ "武大靖"
495
+ ],
496
+ "title": "武大靖_百度百科",
497
+ "link": "https://baike.baidu.com/item/%E6%AD%A6%E5%A4%A7%E9%9D%96/5383910"
498
+ },
499
+ "organic": [
500
+ {
501
+ "title": "武大靖- 维基百科,自由的百科全书",
502
+ "link": "https://zh.wikipedia.org/zh-hans/%E6%AD%A6%E5%A4%A7%E9%9D%96",
503
+ "snippet": "2018年平昌冬奥会,武大靖在男子500米预赛第一小组出场,并以40.264秒的成绩刷新奥运会纪录,在1/4决赛中又以39秒800打破2012年由杰·亚·塞斯基创下的世界记录,成为历史上第二 ...",
504
+ "position": 1
505
+ },
506
+ {
507
+ "title": "一天两破世界纪录!武大靖夺中国代表团平昌冬奥首金",
508
+ "link": "https://www.chinanews.com/m/ty/2018/02-22/8452267.shtml",
509
+ "snippet": "在平昌冬奥会短道速滑男子500米的决赛中,现世界排名第一的中国选手武大靖以创世界纪录的39秒584获得冠军。",
510
+ "date": "Feb 22, 2018",
511
+ "position": 2
512
+ },
513
+ {
514
+ "title": "武大靖击碎世界纪录历史新篇舍我其谁-2018平昌冬季奥运会",
515
+ "link": "https://www.sport.gov.cn/n4/n14896/n14902/c848367/content.html",
516
+ "snippet": "滑完半决赛后,我觉得体能已经耗尽了。”半决赛,武大靖的成绩只有40.087秒。 男子500米A组决赛在武大靖、加拿大人吉拉德以及两名韩国选手黄大恒、林孝俊之间 ...",
517
+ "date": "Feb 23, 2018",
518
+ "position": 3
519
+ },
520
+ {
521
+ "title": "再破世界纪录!武大靖豪取短道500米赛季三连冠 - 中国新闻网",
522
+ "link": "https://m.chinanews.com/wap/detail/chs/zw/8674553.shtml",
523
+ "snippet": "平昌奥运周期中,武大靖的速度不断跃升。2018年初,武大靖在平昌冬奥会短道速滑男子500米半决赛和决赛相继打破世界纪录,并最终夺魁,为中国短道队贡献了当届冬奥会上的唯一 ...",
524
+ "position": 4
525
+ },
526
+ {
527
+ "title": "平昌冬奥会短道速滑男子500米:武大靖破世界纪录夺冠【5】 - 人民网",
528
+ "link": "http://world.people.com.cn/n1/2018/0223/c1002-29830304-5.html",
529
+ "snippet": "2月22日晚,2018平昌冬奥会短道速滑男子500米决赛上演,中国选手武大靖一骑绝尘,没有给韩国队任何机会,以39秒584的成绩再次刷新自己刚创造的世界纪录和 ...",
530
+ "date": "Feb 23, 2018",
531
+ "position": 5
532
+ },
533
+ {
534
+ "title": "2018年冬季奥林匹克运动会短道速滑男子500米比赛 - 维基百科",
535
+ "link": "https://zh.wikipedia.org/zh-hans/2018%E5%B9%B4%E5%86%AC%E5%AD%A3%E5%A5%A5%E6%9E%97%E5%8C%B9%E5%85%8B%E8%BF%90%E5%8A%A8%E4%BC%9A%E7%9F%AD%E9%81%93%E9%80%9F%E6%BB%91%E7%94%B7%E5%AD%90500%E7%B1%B3%E6%AF%94%E8%B5%9B",
536
+ "snippet": "最终,由中国选手武大靖以破世界纪录的成绩夺冠,这是中国代表团在本届冬奥会上的首枚金牌及唯一一块金牌。 韩��选手黄大宪和林孝俊分获银牌和铜牌。",
537
+ "position": 6
538
+ },
539
+ {
540
+ "title": "燃爆!武大靖!一年两破世界纪录! - China Daily",
541
+ "link": "http://china.chinadaily.com.cn/2018-11/12/content_37246243.htm",
542
+ "snippet": "2018年2月22日,平昌冬奥短道速滑男子500米决赛,武大靖以39秒584的成绩打破世界纪录强势夺冠,为中国赢得平昌冬奥首枚金牌,也是中国男子短道速滑队 ...",
543
+ "date": "Nov 12, 2018",
544
+ "position": 7
545
+ },
546
+ {
547
+ "title": "2018平昌冬奥会短道速滑男子500米:武大靖破世界纪录夺冠",
548
+ "link": "http://m.cnr.cn/jdt/20180223/t20180223_524141044.html",
549
+ "snippet": "2月22日晚,2018平昌冬奥会短道速滑男子500米决赛上演,中国选手武大靖一骑绝尘,没有给韩国队任何机会,以39秒584的成绩再次刷新自己刚创造的世界纪录和 ...",
550
+ "date": "Feb 23, 2018",
551
+ "position": 8
552
+ },
553
+ {
554
+ "title": "燃爆了!中国短道速滑名将武大靖一年两破世界纪录 - 央视网",
555
+ "link": "http://news.cctv.com/2018/11/12/ARTIkv6OahnNVbHJ3vk6szOt181112.shtml",
556
+ "snippet": "2018年2月22日,平昌冬奥短道速滑男子500米决赛,武大靖以39秒584的成绩打破世界纪录强势夺冠,为中国赢得平昌冬奥首枚金牌,也是中国男子短道速滑队在冬季 ...",
557
+ "date": "Nov 12, 2018",
558
+ "position": 9
559
+ },
560
+ {
561
+ "title": "武大靖两破500米世界纪录夺冠中国军团获本届冬奥首金 - 人民政协网",
562
+ "link": "https://www.rmzxw.com.cn/c/2018-02-23/1964457.shtml?n2m=1",
563
+ "snippet": "39. 584秒,当武大靖滑过终点时,他在今天第二次创造了短道速滑男子500米世界纪录,同时也为中国体育代表团带来了本届冬奥会的首枚金牌。",
564
+ "date": "Feb 23, 2018",
565
+ "position": 10
566
+ }
567
+ ],
568
+ "relatedSearches": [
569
+ {
570
+ "query": "武大靖老婆"
571
+ },
572
+ {
573
+ "query": "武大靖微博"
574
+ },
575
+ {
576
+ "query": "武大靖演戏"
577
+ },
578
+ {
579
+ "query": "武大靖滑雪"
580
+ },
581
+ {
582
+ "query": "武大靖噓國王在冬眠"
583
+ },
584
+ {
585
+ "query": "王濛"
586
+ },
587
+ {
588
+ "query": "武大靖王鶴棣"
589
+ },
590
+ {
591
+ "query": "许宏志"
592
+ }
593
+ ],
594
+ "credits": 2
595
+ },
596
+ "武大靖 2023年 客串电影 角色": {
597
+ "searchParameters": {
598
+ "q": "武大靖 2023年 客串电影 角色",
599
+ "type": "search",
600
+ "num": 11,
601
+ "mkt": "en-US",
602
+ "setLang": "en",
603
+ "engine": "google"
604
+ },
605
+ "organic": [
606
+ {
607
+ "title": "武大靖真实演绎自己,冰雪题材剧集《嘘,国王在冬眠》热播 - 大洋新闻",
608
+ "link": "https://news.dayoo.com/gzrbrmt/202503/18/170636_54800616.htm",
609
+ "snippet": "《嘘,国王在冬眠》邀请数十位运动员参与拍摄及动作指导,奥运冠军武大靖加盟客串。 除日常围读剧本外,开机前组织主要演员雪场集中训练,以冰雪运动传递年轻 ...",
610
+ "date": "Mar 18, 2025",
611
+ "position": 1
612
+ },
613
+ {
614
+ "title": "武大靖- 維基百科,自由的百科全書",
615
+ "link": "https://zh.wikipedia.org/zh-hant/%E6%AD%A6%E5%A4%A7%E9%9D%96",
616
+ "snippet": "2020 《我在北京等你》 (客串); 2020 《冰糖燉雪梨》 (客串); 2023 《志願軍:雄兵出擊》 (客串,飾演楊根思); 2025 《噓,國王在冬眠》(客串). 參考資料. 編輯. ^ 奥运冠军 ...",
617
+ "attributes": {
618
+ "Missing": "电影 角色"
619
+ },
620
+ "position": 2
621
+ },
622
+ {
623
+ "title": "[中国电影报道]武大靖客串出演《嘘,国王在冬眠》 推广冰雪运动",
624
+ "link": "https://tv.cctv.com/2025/03/24/VIDEVvydehAVIcsMWkgDnZxf250324.shtml",
625
+ "snippet": "武大靖客串出演《嘘,国王在冬眠》,推广冰雪运动。",
626
+ "date": "Mar 24, 2025",
627
+ "attributes": {
628
+ "Missing": "2023 | Show results with:2023"
629
+ },
630
+ "position": 3
631
+ },
632
+ {
633
+ "title": "我校在第六届“我心中的思政课”全国高校大学生 - 马克思主义学院2023",
634
+ "link": "https://www.bipt.edu.cn/pub/mkszyxy/jxky/zyzs/6880dd503dc746dfa74c27419b9ce8ed.htm",
635
+ "snippet": "马克思主义学院冷文勇、吴爱萍、李齐、李建华、武靖茗等五位老师指导微电影制作,王佳雨、黄蓝薇、王曦嘉、尹沛郁、肖一弓等17名学生参演了微电影,李齐老师客串了片中角色。",
636
+ "position": 4
637
+ },
638
+ {
639
+ "title": "虞书欣、林一甜蜜“撒糖”,《嘘,国王在冬眠》能否超越《难哄》?",
640
+ "link": "https://www.163.com/dy/article/JQD2B2Q7053469KC.html",
641
+ "snippet": "... 武大靖加盟客串。除日常围读剧本外,开机前组织主要演员雪场集中训练,响应“三亿人上冰雪”号召,以冰雪运动传递年轻人生活新方式。 服化道上,主创 ...",
642
+ "date": "Mar 11, 2025",
643
+ "position": 5
644
+ },
645
+ {
646
+ "title": "武大靖- 维基百科,自由的百科全书",
647
+ "link": "https://zh.wikipedia.org/zh-cn/%E6%AD%A6%E5%A4%A7%E9%9D%96",
648
+ "snippet": "武大靖(1994年7月24日—)黑龙江省佳木斯市人,中国男子短道速滑队运动员。短 ... 2023 《志愿军:雄兵出击》 (客串,饰演杨根思); 2025 《嘘,国王在冬眠》(客串) ...",
649
+ "attributes": {
650
+ "Missing": "电影 角色"
651
+ },
652
+ "position": 6
653
+ },
654
+ {
655
+ "title": "武大靖 - 抖音百科",
656
+ "link": "https://m.baike.com/wikiid/7158714449493901325",
657
+ "snippet": "武大靖,1994年7月24日出生于黑龙江省佳木斯市,中国男子短道速滑国家队运动员,短道速滑男子500米世界纪录、奥运纪录保持者、吉林省体育局冰上运动管理 ...",
658
+ "date": "Jan 5, 2025",
659
+ "position": 7
660
+ },
661
+ {
662
+ "title": "杨紫嫣_百度百科",
663
+ "link": "https://baike.baidu.hk/item/%E6%A5%8A%E7%B4%AB%E5%AB%A3/16926549",
664
+ "snippet": "杨紫嫣,1976年4月16日出生于北京,中国大陆演员,毕业于中央戏剧学院表演系。 1994年,因在武侠剧《书剑恩仇录》中饰演香香公主而出道。2011年,在宫廷剧《甄嬛传》中饰演 ...",
665
+ "position": 8
666
+ },
667
+ {
668
+ "title": "苏有朋_百度百科",
669
+ "link": "https://baike.baidu.com/item/%E8%8B%8F%E6%9C%89%E6%9C%8B/294596",
670
+ "snippet": "苏有朋,1973年9月11日出生于台湾省台北市,1991年,考入台湾大学机械工程系。中国台湾影视男演员、歌手、电影导演、制片人。1988年,作为小虎队最小成员“乖乖虎” ...",
671
+ "position": 9
672
+ }
673
+ ],
674
+ "images": [
675
+ {
676
+ "title": "专访丨奥运冠军武大靖:出演电视剧,台词都是真实经历- YouTube",
677
+ "imageUrl": "https://i.ytimg.com/vi/6fURJC8TrZg/sddefault.jpg",
678
+ "link": "https://www.youtube.com/watch?v=6fURJC8TrZg"
679
+ },
680
+ {
681
+ "title": "冰上逐梦18年,三战冬奥,两届夺金!专访中国男子短道速滑队运动员武大 ...",
682
+ "imageUrl": "https://i.ytimg.com/vi/tHmV4cE1QB4/maxresdefault.jpg",
683
+ "link": "https://www.youtube.com/watch?v=tHmV4cE1QB4"
684
+ },
685
+ {
686
+ "title": "武大靖是真的会飞檐走壁吧#全员加速中2023 #shorts 【咪咕MiGu官方频道 ...",
687
+ "imageUrl": "https://i.ytimg.com/vi/eoTf4yGkj68/maxres2.jpg?sqp=-oaymwEoCIAKENAF8quKqQMcGADwAQH4AbYIgAKAD4oCDAgAEAEYfyAvKC0wDw==&rs=AOn4CLAxiGqPg0b6eIs4XfxJZpFWANJImQ",
688
+ "link": "https://www.youtube.com/watch?v=eoTf4yGkj68"
689
+ },
690
+ {
691
+ "title": "中国电影报道]武大靖客串出演《嘘,国王在冬眠》 推广冰雪运动",
692
+ "imageUrl": "https://p4.img.cctvpic.com/fmspic/2025/03/24/4a9bc7d6a98c49f19af8dc3b01aa3d46-1.jpg",
693
+ "link": "https://tv.cctv.com/2025/03/24/VIDEVvydehAVIcsMWkgDnZxf250324.shtml"
694
+ },
695
+ {
696
+ "title": "武大靖“本色” 客串《嘘,国王在冬眠》,讲述真实经历(2025) 全集带字幕 ...",
697
+ "imageUrl": "http://m.iqiyipic.com/u2/image/20250317/7b/0e/pv_1086633913887100_d_601_480_270.jpg",
698
+ "link": "https://www.iq.com/album/%E6%AD%A6%E5%A4%A7%E9%9D%96-%E2%80%9C%E6%9C%AC%E8%89%B2%E2%80%9D-%E5%AE%A2%E4%B8%B2%E3%80%8A%E5%98%98-%E5%9B%BD%E7%8E%8B%E5%9C%A8%E5%86%AC%E7%9C%A0%E3%80%8B-%E8%AE%B2%E8%BF%B0%E7%9C%9F%E5%AE%9E%E7%BB%8F%E5%8E%86-2025-a5a0t1ao7k?lang=zh_cn"
699
+ },
700
+ {
701
+ "title": "冰糖炖雪梨》将开播武大靖客串盼推广冰雪运动_吉林频道_凤凰网",
702
+ "imageUrl": "http://x0.ifengimg.com/cmpp/2020_12/e67ed27d0acbf83_size66_w700_h465.jpg",
703
+ "link": "http://jl.ifeng.com/a/20200320/13953081_0.shtml"
704
+ },
705
+ {
706
+ "title": "榜样7》| 武大靖:冰上竞技当仁不让为国争光初心不改_共产党员网",
707
+ "imageUrl": "https://p5.img.cctvpic.com/photoworkspace/contentimg/2023/03/25/2023032519580742463.jpg",
708
+ "link": "https://www.12371.cn/2023/03/25/VIDE1679736123173822.shtml"
709
+ },
710
+ {
711
+ "title": "榜样7》| 武大靖:冰上竞技当仁不让为国争光初心不改_共产党员网",
712
+ "imageUrl": "https://p1.img.cctvpic.com/photoworkspace/contentimg/2023/03/25/2023032519582757793.jpg",
713
+ "link": "https://www.12371.cn/2023/03/25/VIDE1679736123173822.shtml"
714
+ },
715
+ {
716
+ "title": "武大靖真实演绎自己,冰雪题材剧集《嘘,国王在冬眠》热播_广州日报大洋网",
717
+ "imageUrl": "https://dayooimg.dayoo.com/gzrbrmt/img/202503/18/54800616_e6fb6d55-cfef-4af8-816c-041df8bbe3aa.jpg",
718
+ "link": "https://news.dayoo.com/gzrbrmt/202503/18/170636_54800616.htm"
719
+ }
720
+ ],
721
+ "relatedSearches": [
722
+ {
723
+ "query": "Ren Ziwei"
724
+ },
725
+ {
726
+ "query": "武大靖退役"
727
+ },
728
+ {
729
+ "query": "武大靖結婚"
730
+ },
731
+ {
732
+ "query": "武大靖客串"
733
+ },
734
+ {
735
+ "query": "武大靖滑雪"
736
+ },
737
+ {
738
+ "query": "韓天宇"
739
+ },
740
+ {
741
+ "query": "乘風破浪 2023"
742
+ },
743
+ {
744
+ "query": "中國 冬 奧運 動員"
745
+ },
746
+ {
747
+ "query": "王 濛 綜藝"
748
+ }
749
+ ],
750
+ "credits": 2
751
+ }
752
+ }
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/cache/url_cache.json ADDED
The diff for this file is too large to render. See raw diff
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/SimpleDeepSearcher_demo.py ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # === 标准库导入(Python 内置模块) ===
2
+ import argparse
3
+ import asyncio
4
+ import json
5
+ import logging
6
+ import multiprocessing
7
+ import os
8
+ import re
9
+ import string
10
+ import time
11
+ from functools import partial
12
+ from logging.handlers import RotatingFileHandler
13
+ from typing import Optional, Tuple, List, Dict
14
+
15
+ # === 第三方库 ===
16
+ import httpx
17
+ import numpy as np
18
+ import streamlit as st
19
+ import torch
20
+ from tqdm import tqdm
21
+ from urllib.parse import urlparse
22
+ # === 第三方 Transformers & vLLM ===
23
+ from transformers import AutoTokenizer
24
+ from vllm import LLM, SamplingParams
25
+ from google_search import (
26
+ google_web_search,
27
+ extract_relevant_info,
28
+ fetch_page_content,
29
+ extract_snippet_with_context
30
+ )
31
+ # from evaluate import (
32
+ # run_evaluation,
33
+ # run_evaluation_for_eval,
34
+ # extract_answer
35
+ # )
36
+ from prompts import (
37
+ get_multiqa_instruction,
38
+ get_math_instruction,
39
+ get_task_instruction_openqa,
40
+ get_task_instruction_math,
41
+ get_webpage_to_reasonchain_instruction
42
+ )
43
+ from openai import OpenAI
44
+
45
+ from stage_wise_analysis import stage_wise_analysis
46
+
47
+ # Define special tokens
48
+ BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
49
+ END_SEARCH_QUERY = "<|end_search_query|>"
50
+ BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
51
+ END_SEARCH_RESULT = "<|end_search_result|>"
52
+
53
+
54
+
55
+ # 应用标题
56
+ TITLE = "Chat with LLM"
57
+
58
+ # 设置 Streamlit 页面配置
59
+ st.set_page_config(page_title=TITLE, layout="wide", initial_sidebar_state="collapsed")
60
+
61
+
62
+
63
+ def tokenize_prompt(history, query, model_path, multi_turn=False):
64
+ messages = []
65
+ if multi_turn:
66
+ for q, r in history:
67
+ messages.append({"role": "user", "content": q})
68
+ messages.append({"role": "assistant", "content": r})
69
+ messages.append({"role": "user", "content": query})
70
+
71
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
72
+ return tokenizer.apply_chat_template(
73
+ messages, add_generation_prompt=True, tokenize=False
74
+ )
75
+
76
+ # 配置日志
77
+ def setup_logging(log_file):
78
+ logging.basicConfig(
79
+ level=logging.INFO,
80
+ format="%(asctime)s - %(levelname)s - %(message)s",
81
+ handlers=[
82
+ RotatingFileHandler(log_file, maxBytes=1024 * 1024 * 5, backupCount=5), # 日志文件最大 5MB,保留 5 个备份
83
+ logging.StreamHandler(), # 同时输出到控制台
84
+ ],
85
+ )
86
+
87
+
88
+ def display_chat(chat):
89
+ with st.chat_message("user", avatar=chat["avatar"]):
90
+ st.markdown(chat["input"], unsafe_allow_html=True)
91
+ column_list = st.columns(len(chat["response"]))
92
+ for i, (column, response) in enumerate(zip(column_list, chat["response"])):
93
+ with column:
94
+ st.header(response["model"])
95
+ with st.chat_message("assistant", avatar=response["avatar"]):
96
+ st.markdown(response["text"], unsafe_allow_html=True)
97
+
98
+ def extract_answer(output, mode='gen'):
99
+ extracted_text = ''
100
+ if output is None:
101
+ output = "None"
102
+ if mode == 'codegen':
103
+ # Extract the code between ```python and ```
104
+ pattern = r'```python\s*(.*?)\s*```'
105
+ matches = re.findall(pattern, output, re.DOTALL | re.IGNORECASE)
106
+ if matches:
107
+ extracted_text = matches[-1].strip() # Take the last match
108
+ elif mode == 'infogen': # 提取模型基于网页内容生成的推理
109
+ # Extract content after **Final Information** or **Modified Reasoning Steps**
110
+ # pattern_info = "\n**Final Information**"
111
+ # pattern_step = "\n**Modified Reasoning Steps**"
112
+ pattern_info = "**Final Information**"
113
+ pattern_step = "**Modified Reasoning Steps**"
114
+ if pattern_info in output:
115
+ extracted_text = output.split(pattern_info)[-1].replace("\n","").strip("```").strip()
116
+ elif pattern_step in output:
117
+ extracted_text = output.split(pattern_step)[-1].strip("```").strip()
118
+ else:
119
+ # extracted_text = "No helpful information found."
120
+ extracted_text = output
121
+ else:
122
+ # Existing extraction logic for 'gen' and 'choose' modes
123
+ pattern = r'\\boxed\{(.*)\}'
124
+ matches = re.findall(pattern, output)
125
+ if matches:
126
+ extracted_text = matches[-1] # Take the last match
127
+ if mode in ['choose', 'qa']:
128
+ # Handle 'choose' mode
129
+ inner_pattern = r'\\text\{(.*)\}'
130
+ inner_matches = re.findall(inner_pattern, extracted_text)
131
+ if inner_matches:
132
+ extracted_text = inner_matches[-1] # Take the last match
133
+ extracted_text = extracted_text.strip("()")
134
+ return extracted_text
135
+
136
+ # def generate_favicon_html(urls):
137
+ # favicons = []
138
+ # for url in urls:
139
+ # domain = urlparse(url).netloc
140
+ # favicon_url = f"https://{domain}/favicon.ico"
141
+ # favicons.append(f'<img src="{favicon_url}" width="16" style="margin-right:4px;">')
142
+ # return "**🌐 Fetching web pages:** " + "".join(favicons)
143
+
144
+ def generate_favicon_html(urls):
145
+ favicons = []
146
+ for url in urls:
147
+ domain = urlparse(url).netloc
148
+ # 使用 Google 的 S2 Favicon 服务
149
+ # sz=16 表示获取 16x16 大小的图标
150
+ favicon_service_url = f"https://www.google.com/s2/favicons?domain={domain}&sz=16"
151
+ favicons.append(f'<img src="{favicon_service_url}" width="16" style="margin-right:4px;" alt=" " onerror="this.style.display=\'none\'">')
152
+ return "**🌐 Fetching web pages:** " + "".join(favicons)
153
+
154
+ def webpage_analysis_single(summ_model_url, summ_model_path, prompt) -> str:
155
+ client_summ_model = OpenAI(
156
+ base_url=summ_model_url,
157
+ api_key="EMPTY"
158
+ )
159
+ for i in range(10): # max retry 10 times
160
+ try:
161
+ completion = client_summ_model.chat.completions.create(
162
+ model=summ_model_path,
163
+ max_tokens=8192,
164
+ temperature=0.6,
165
+ top_p=0.95,
166
+ messages=[prompt],
167
+ )
168
+ return completion.choices[0].message.content
169
+ except Exception as e:
170
+ logging.info(e)
171
+ time.sleep(1)
172
+ continue
173
+ return "None"
174
+
175
+
176
+
177
+ async def fetch_model_response(widget, history, question, model_config, response_dict, **kwargs):
178
+ filtered_data = [{"Question": question, "Answer": ""}]
179
+
180
+ # temperature = args.temperature
181
+ # top_p = args.top_p
182
+ # top_k_sampling = args.top_k_sampling
183
+ # max_tokens = args.max_tokens
184
+
185
+
186
+ dataset_name = "user_queries"
187
+ subset_num = -1
188
+ MAX_SEARCH_LIMIT = 10
189
+ MAX_TURN = 15
190
+ top_k = 10
191
+ max_doc_len = 3000
192
+ summ_model_path = "/capacity/userdata/models/QwQ-32B"
193
+ summ_model_url = "http://localhost:8003/v1"
194
+ google_subscription_key = "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d"
195
+ google_endpoint = "https://google.serper.dev/search"
196
+ cache_dir = "../cache"
197
+ output_dir = "output"
198
+ is_exclude_urls = False
199
+ max_tokens = 20480
200
+
201
+
202
+ search_cache_path = os.path.join(cache_dir, 'search_cache.json')
203
+ url_cache_path = os.path.join(cache_dir, 'url_cache.json')
204
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
205
+ search_cache = json.load(f)
206
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
207
+ url_cache = json.load(f)
208
+
209
+ widget_output = ""
210
+
211
+
212
+ # Function to save caches
213
+ def save_caches():
214
+ with open(search_cache_path, 'w', encoding='utf-8') as f:
215
+ json.dump(search_cache, f, ensure_ascii=False, indent=2)
216
+ with open(url_cache_path, 'w', encoding='utf-8') as f:
217
+ json.dump(url_cache, f, ensure_ascii=False, indent=2)
218
+
219
+ def generate_webpage_to_reasonchain_batch(
220
+ original_questions: List[str],
221
+ prev_reasonings: List[str],
222
+ search_queries: List[str],
223
+ documents: List[str],
224
+ summ_model_url: OpenAI,
225
+ summ_model_path: str,
226
+ batch_output_records: List[Dict], # New parameter to collect outputs
227
+ coherent: bool = False,
228
+ ) -> List[str]:
229
+
230
+ user_prompts = [
231
+ get_webpage_to_reasonchain_instruction(r, sq, doc)
232
+ for r, sq, doc in zip(prev_reasonings, search_queries, documents)
233
+ ]
234
+
235
+ logging.info(f"generate_webpage_to_reasonchain_batch, len(user_prompts): {len(user_prompts)}")
236
+ prompts = [{"role": "user", "content": up} for up in user_prompts]
237
+ logging.info("webpage ana prompts[0]")
238
+ logging.info(prompts[0])
239
+
240
+ raw_outputs = []
241
+
242
+ for prompt in prompts:
243
+ raw_output = webpage_analysis_single(summ_model_url, summ_model_path, prompt)
244
+ raw_outputs.append(raw_output)
245
+ # webpage_analysis_single_to_map = partial(webpage_analysis_single, summ_model_url, summ_model_path)
246
+ # with multiprocessing.Pool(processes=10) as pool:
247
+ # raw_outputs = list(tqdm(pool.imap(webpage_analysis_single_to_map, prompts), total=len(prompts), desc="generate webpage analyses"))
248
+
249
+
250
+ # Count the number of summarization errors
251
+ sum_error = 0
252
+ for output in raw_outputs:
253
+ if output is None or output == "None" or output == "":
254
+ sum_error += 1
255
+ logging.info(f"summarization_error: {sum_error}, ratios: {sum_error / len(raw_outputs)}")
256
+
257
+ extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs]
258
+
259
+ for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
260
+ batch_output_records.append({
261
+ 'prompt': p,
262
+ 'raw_output': r,
263
+ 'extracted_info': e
264
+ })
265
+
266
+
267
+
268
+ return extracted_infos
269
+
270
+ # ---------------------- Preparation of Input Prompts ----------------------
271
+ input_list = []
272
+ for item in filtered_data:
273
+ question = item['Question']
274
+
275
+ if dataset_name in ['aime']:
276
+ instruction = get_multiqa_instruction(MAX_SEARCH_LIMIT)
277
+ user_prompt = get_task_instruction_math(question)
278
+
279
+ else:
280
+ instruction = get_multiqa_instruction(MAX_SEARCH_LIMIT)
281
+ user_prompt = get_task_instruction_openqa(question)
282
+
283
+
284
+ # prompt = [{"role": "user", "content": instruction + user_prompt}]
285
+ prompt = instruction + user_prompt
286
+ prompt = tokenize_prompt(history, prompt, model_config['model_path'], multi_turn=True)
287
+ # prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
288
+ input_list.append(prompt)
289
+ logging.info(f"Input prompt for question '{question}':\n {prompt}")
290
+
291
+ # Initialize active sequences
292
+ active_sequences = [{
293
+ 'item': item,
294
+ 'prompt': prompt,
295
+ 'output': '',
296
+ 'finished': False,
297
+ 'history': [],
298
+ 'search_count': 0,
299
+ 'executed_search_queries': set(),
300
+ 'all_info': [],
301
+ } for item, prompt in zip(filtered_data, input_list)]
302
+
303
+ async def run_generation(sequences: List[Dict], max_tokens: int, turn: int) -> List:
304
+ prompts = [s['prompt'] for s in sequences]
305
+ headers = {"User-Agent": "Test Client"}
306
+ payload = {
307
+ "prompt": prompts[0],
308
+ "max_tokens": max_tokens,
309
+ "temperature": 0.6,
310
+ "top_p": 0.95,
311
+ "top_k": 40,
312
+ "stop":[END_SEARCH_QUERY, "<|im_end|>"],
313
+ "include_stop_str_in_output": True,
314
+ "stream": True,
315
+ }
316
+ payload.update(kwargs)
317
+ logging.debug(f"Sending request to model: {model_config['model_name']}")
318
+ logging.debug(f"Request payload: {payload}")
319
+ client = httpx.AsyncClient()
320
+ async with client.stream(
321
+ "POST", model_config["api_url"], headers=headers, json=payload
322
+ ) as response:
323
+ output_final = ""
324
+
325
+ async for chunk in response.aiter_bytes(): # 1,2,3
326
+ if chunk:
327
+ try:
328
+ data = json.loads(chunk.decode("utf-8").rstrip("\x00"))
329
+ output = data["text"][0][len(prompts[0]) :]
330
+ # logging.info(f"output11111: {output}")
331
+ # widget_output += output
332
+ if widget_output == "":
333
+ widget.markdown(f"**✨ Reasoning {turn}:** " + output, unsafe_allow_html=True)
334
+ else:
335
+ widget.markdown(widget_output + f"\n**✨ Reasoning {turn}:** " + output, unsafe_allow_html=True)
336
+ output_final = output
337
+ # output_buffer += output
338
+ except json.JSONDecodeError as e:
339
+ # logging.error(f"Attention!: JSON decode error: {e}, chunk: {chunk}")
340
+ continue
341
+
342
+ return [output_final]
343
+
344
+
345
+
346
+ # Function to extract text between two tags
347
+ def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
348
+ pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
349
+ matches = re.findall(pattern, text, flags=re.DOTALL)
350
+ if matches:
351
+ return matches[-1].strip()
352
+ return None
353
+
354
+ def replace_recent_steps(origin_str, replace_str):
355
+ """
356
+ Replaces specific steps in the original reasoning steps with new steps.
357
+ If a replacement step contains "DELETE THIS STEP", that step is removed.
358
+
359
+ Parameters:
360
+ - origin_str (str): The original reasoning steps.
361
+ - replace_str (str): The steps to replace or delete.
362
+
363
+ Returns:
364
+ - str: The updated reasoning steps after applying replacements.
365
+ """
366
+
367
+ def parse_steps(text):
368
+ """
369
+ Parses the reasoning steps from a given text.
370
+
371
+ Parameters:
372
+ - text (str): The text containing reasoning steps.
373
+
374
+ Returns:
375
+ - dict: A dictionary mapping step numbers to their content.
376
+ """
377
+ step_pattern = re.compile(r"Step\s+(\d+):\s*")
378
+ steps = {}
379
+ current_step_num = None
380
+ current_content = []
381
+
382
+ for line in text.splitlines():
383
+ step_match = step_pattern.match(line)
384
+ if step_match:
385
+ # If there's an ongoing step, save its content
386
+ if current_step_num is not None:
387
+ steps[current_step_num] = "\n".join(current_content).strip()
388
+ current_step_num = int(step_match.group(1))
389
+ content = line[step_match.end():].strip()
390
+ current_content = [content] if content else []
391
+ else:
392
+ if current_step_num is not None:
393
+ current_content.append(line)
394
+
395
+ # Save the last step if any
396
+ if current_step_num is not None:
397
+ steps[current_step_num] = "\n".join(current_content).strip()
398
+
399
+ return steps
400
+
401
+ # Parse the original and replacement steps
402
+ origin_steps = parse_steps(origin_str)
403
+ replace_steps = parse_steps(replace_str)
404
+
405
+ # Apply replacements
406
+ for step_num, content in replace_steps.items():
407
+ if "DELETE THIS STEP" in content:
408
+ # Remove the step if it exists
409
+ if step_num in origin_steps:
410
+ del origin_steps[step_num]
411
+ else:
412
+ # Replace or add the step
413
+ origin_steps[step_num] = content
414
+
415
+ # Sort the steps by step number
416
+ sorted_steps = sorted(origin_steps.items())
417
+
418
+ # Reconstruct the reasoning steps as a single string
419
+ new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps])
420
+
421
+ return new_reasoning_steps
422
+
423
+
424
+ # ---------------------- Initialize Collection Structure ----------------------
425
+ # Initialize a list to collect batch outputs
426
+ batch_output_records = []
427
+
428
+ start_time = time.time()
429
+ turn = 0
430
+
431
+ # Main loop until all sequences are finished or maximum turns reached
432
+ while True:
433
+ # Identify sequences that need generation
434
+ sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']]
435
+
436
+ if sequences_needing_generation:
437
+ turn += 1
438
+ logging.info(f'\n-------------- Turn {turn} --------------')
439
+ logging.info(f"We have {len(sequences_needing_generation)} sequences needing generation...")
440
+ outputs = await run_generation(sequences_needing_generation, max_tokens, turn)
441
+ if widget_output == "":
442
+ widget_output += f"**✨ Reasoning {turn}:** " + outputs[0]
443
+ else:
444
+ widget_output += f"\n**✨ Reasoning {turn}:** " + outputs[0] # Append the first output to widget_output
445
+ widget.markdown(widget_output, unsafe_allow_html=True)
446
+ logging.info("Generation completed, processing outputs...")
447
+
448
+ # Initialize batch variables
449
+ batch_relevant_info = []
450
+ batch_original_questions = []
451
+ batch_prev_reasonings = []
452
+ batch_search_queries = []
453
+ batch_documents = []
454
+ batch_sequences = []
455
+
456
+ # Collect URLs to fetch across all sequences
457
+ all_urls_to_fetch = set()
458
+ url_snippets = {}
459
+ url_sequence_map = {} # Map URL to list of sequences needing it
460
+
461
+ start_search_time = time.time()
462
+ # Process each sequence and collect URLs
463
+ for seq, out in zip(sequences_needing_generation, outputs):
464
+ text = out
465
+ seq['history'].append(text)
466
+ # Append generated text to prompt and output
467
+ seq['prompt'] += text
468
+ seq['output'] += text
469
+ seq['all_info'].append({f"turn_{turn}_reason": text})
470
+ # Extract search query
471
+ search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
472
+
473
+ # If a search query is present and needs to be executed
474
+ if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
475
+ if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
476
+ # Execute search, use cache if available
477
+ if search_query in search_cache:
478
+ results = search_cache[search_query]
479
+ logging.info(f"Using cached search results for query: \"{search_query}\"")
480
+ else:
481
+ try:
482
+ if is_exclude_urls and "urls" in seq["item"]["metadata"]:
483
+ logging.info(f"is_exclude_urls: {is_exclude_urls}")
484
+ exclude_urls = seq["item"]["metadata"]["urls"]
485
+ else:
486
+ exclude_urls = []
487
+
488
+ logging.info(f"Execute and cache search for query: \"{search_query}\"")
489
+
490
+ # 更新输出
491
+ widget_output += f"\n\n**🔍 Executing search for query:** {search_query}\n"
492
+ widget.markdown(widget_output, unsafe_allow_html=True)
493
+
494
+ results = google_web_search(search_query, google_subscription_key, google_endpoint, market='en-US', language='en', exclude_urls=exclude_urls) # 执行搜索
495
+ search_cache[search_query] = results
496
+ logging.info(f"Executed and cached search for query: \"{search_query}\"")
497
+ except Exception as e:
498
+ logging.info(f"Error during search query '{search_query}': {e}")
499
+ search_cache[search_query] = {}
500
+ results = {}
501
+
502
+ # Extract relevant information from Bing search results
503
+ relevant_info = extract_relevant_info(results)[:top_k]
504
+ seq['relevant_info'] = relevant_info
505
+
506
+ # Extract URLs and snippets
507
+ urls_to_fetch = [it['url'] for it in relevant_info]
508
+
509
+ ## 更新网页
510
+ widget_output += "\n\n" + generate_favicon_html(urls_to_fetch) + "\n"
511
+ widget.markdown(widget_output, unsafe_allow_html=True)
512
+
513
+ snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info}
514
+
515
+ # Filter URLs that are not cached
516
+ urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache]
517
+ cached_urls = [u for u in urls_to_fetch if u in url_cache]
518
+
519
+ # Store info for all_urls_to_fetch and url_snippets
520
+ for url in urls_to_fetch_filtered:
521
+ all_urls_to_fetch.add(url)
522
+ url_snippets[url] = snippets.get(url, "")
523
+
524
+ all_reasoning_steps = seq['output']
525
+ all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n")
526
+
527
+ truncated_prev_reasoning = ""
528
+ for i, step in enumerate(all_reasoning_steps):
529
+ truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
530
+
531
+ prev_steps = truncated_prev_reasoning.split('\n\n')
532
+ if len(prev_steps) <= 5:
533
+ truncated_prev_reasoning = '\n\n'.join(prev_steps)
534
+ else:
535
+ truncated_prev_reasoning = ''
536
+ for i, step in enumerate(prev_steps):
537
+ if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
538
+ truncated_prev_reasoning += step + '\n\n'
539
+ else:
540
+ if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
541
+ truncated_prev_reasoning += '...\n\n'
542
+ truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
543
+
544
+ # Collect parameters for batch processing
545
+ batch_relevant_info.append(relevant_info)
546
+ batch_original_questions.append(seq['item']['Question'])
547
+ batch_prev_reasonings.append(truncated_prev_reasoning)
548
+ batch_search_queries.append(search_query)
549
+ batch_sequences.append(seq)
550
+
551
+ # Update search count and executed queries
552
+ seq['search_count'] += 1
553
+ seq['executed_search_queries'].add(search_query)
554
+
555
+ elif seq['search_count'] >= MAX_SEARCH_LIMIT:
556
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
557
+ seq['prompt'] += limit_message
558
+ seq['output'] += limit_message
559
+ seq['history'].append(limit_message)
560
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
561
+ logging.info(f"Search limit reached for query: \"{search_query}\"")
562
+
563
+ elif search_query in seq['executed_search_queries']:
564
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
565
+ seq['prompt'] += limit_message
566
+ seq['output'] += limit_message
567
+ seq['history'].append(limit_message)
568
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
569
+ logging.info(f"Repeated search for query: \"{search_query}\"")
570
+
571
+
572
+ else:
573
+ # If no search query needs to be executed, mark the sequence as finished
574
+ seq['finished'] = True
575
+ logging.info("Sequence marked as complete.")
576
+
577
+ logging.info(f"get search time taken: {time.time() - start_search_time}")
578
+ logging.info(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
579
+ logging.info(f"all_urls_to_fetch: {all_urls_to_fetch}")
580
+ # Batch fetch all URLs at once to optimize speed
581
+
582
+ if all_urls_to_fetch:
583
+ logging.info(f"Fetching {len(all_urls_to_fetch)} URLs...")
584
+ try:
585
+ fetched_contents = fetch_page_content(
586
+ list(all_urls_to_fetch),
587
+ use_jina=False,
588
+ jina_api_key=None,
589
+ # snippets=url_snippets # Do not pass snippets when updating url_cache directly
590
+ )
591
+ logging.info(f"Fetched {len(fetched_contents)} URLs successfully.")
592
+ except Exception as e:
593
+ logging.info(f"Error during batch URL fetching: {e}")
594
+ fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
595
+ # Update cache with fetched contents
596
+ for url, content in fetched_contents.items():
597
+ url_cache[url] = content
598
+
599
+ # After fetching, prepare formatted documents for batch processing
600
+ for relevant_info in batch_relevant_info:
601
+ formatted_documents = ""
602
+ for i, doc_info in enumerate(relevant_info):
603
+ url = doc_info['url']
604
+ raw_context = url_cache.get(url, "")
605
+ doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
606
+ success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
607
+ if success:
608
+ logging.info("extract_snippet_with_context")
609
+ context = filtered_context
610
+ else:
611
+ logging.info(f"use raw_webpage_context, {len(raw_context)}")
612
+ context = raw_context[:max_doc_len*2]
613
+
614
+ doc_info['context'] = context
615
+ formatted_documents += f"**Web Page {i + 1}:**\n"
616
+ formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
617
+ logging.info(f'formatted_webpage_documents: {len(formatted_documents)}')
618
+ batch_documents.append(formatted_documents)
619
+
620
+ # After fetching, prepare for batch processing if there are any
621
+ if batch_sequences:
622
+ logging.info(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
623
+ webpage_analyses = generate_webpage_to_reasonchain_batch(
624
+ original_questions=batch_original_questions,
625
+ prev_reasonings=batch_prev_reasonings,
626
+ search_queries=batch_search_queries,
627
+ documents=batch_documents,
628
+ summ_model_url=summ_model_url,
629
+ summ_model_path=summ_model_path,
630
+ batch_output_records=batch_output_records, # Pass the collection list
631
+ )
632
+ logging.info("Batch generation completed, assigning outputs to sequences...")
633
+
634
+ for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents):
635
+ ## 更新输出
636
+ widget_output += f"\n**📄Web Pages Summary Results:**\n{analysis}\n"
637
+ widget.markdown(widget_output, unsafe_allow_html=True)
638
+ if isinstance(analysis, str):
639
+ append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n"
640
+ seq['prompt'] += append_text
641
+ seq['output'] += append_text
642
+ seq['history'].append(append_text)
643
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
644
+ else:
645
+ append_text = replace_recent_steps(seq['output'], analysis)
646
+ seq['prompt'] += append_text
647
+ seq['output'] += append_text
648
+ seq['history'].append(append_text)
649
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
650
+
651
+ # Check if all sequences are finished
652
+ active_sequences_part = [{
653
+ 'item': ele["item"],
654
+ 'prompt': ele['prompt'],
655
+ 'output': ele["output"],
656
+ 'finished': ele["finished"],
657
+ 'history':ele["history"],
658
+ 'search_count': ele["search_count"],
659
+ 'all_info': ele['all_info']
660
+ } for ele in active_sequences]
661
+ # with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
662
+ # json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
663
+ unfinished = [seq for seq in active_sequences if not seq['finished']]
664
+ if not unfinished:
665
+ break
666
+ else:
667
+ if turn >= MAX_TURN:
668
+ logging.info(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
669
+ break
670
+
671
+ total_time = time.time() - start_time
672
+ logging.info(f"Total time taken: {total_time} seconds")
673
+
674
+ # # ---------------------- Save Batch Output Records to JSON File ----------------------
675
+ # # Define output JSON file path
676
+ # t = time.localtime()
677
+ # batch_output_file = os.path.join(output_dir, f'test.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
678
+
679
+ # # Save batch_output_records to JSON file
680
+ # with open(batch_output_file, 'w', encoding='utf-8') as f:
681
+ # json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
682
+
683
+ # logging.info(f"Batch outputs saved to {batch_output_file}")
684
+
685
+ # Prepare output list for evaluation
686
+ output_list = [seq['output'] for seq in active_sequences]
687
+
688
+ response_dict["text"] = widget_output# Update the response dictionary with the first output
689
+
690
+ # # Run evaluation
691
+ # if dataset_name in ["eval", "gaia"]:
692
+ # run_evaluation_for_eval(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, 'test')
693
+ # else:
694
+ # run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, 'test')
695
+
696
+ # # ---------------------- Stage-wise Analysis ----------------------
697
+ # turn_files = os.listdir(output_dir)
698
+ # turn_files = [file for file in turn_files if file.startswith("turn_")]
699
+ # max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
700
+
701
+ # max_turn_file_path = os.path.join(output_dir, max_turn_file)
702
+ # logging.info(f"max_turn_file_path: {max_turn_file_path}")
703
+ # stage_wise_analysis(model_path, max_turn_file_path)
704
+
705
+ # ---------------------- Update Search and URL Cache ----------------------
706
+ logging.info('Updating Search and URL Cache...')
707
+ # Load existing caches or initialize empty dictionaries
708
+ if os.path.exists(search_cache_path):
709
+ try:
710
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
711
+ search_cache_new = json.load(f)
712
+ except Exception as e:
713
+ logging.info(f"Error loading search cache: {e}")
714
+ search_cache_new = {}
715
+ else:
716
+ search_cache_new = {}
717
+
718
+ if os.path.exists(url_cache_path):
719
+ try:
720
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
721
+ url_cache_new = json.load(f)
722
+ except Exception as e:
723
+ logging.info(f"Error loading url cache: {e}")
724
+ url_cache_new = {}
725
+ else:
726
+ url_cache_new = {}
727
+
728
+ search_cache.update(search_cache_new)
729
+ url_cache.update(url_cache_new)
730
+
731
+ save_caches()
732
+
733
+ logging.info("Process completed.")
734
+
735
+ def parse_arguments():
736
+ parser = argparse.ArgumentParser()
737
+ parser.add_argument(
738
+ "--config",
739
+ "-c",
740
+ type=str,
741
+ default="./demo_client_basic.json",
742
+ help="path to configs",
743
+ )
744
+ parser.add_argument(
745
+ "--user_avatar",
746
+ "-a",
747
+ type=str,
748
+ default="./user_demo.png",
749
+ help="path to user avatar",
750
+ )
751
+ parser.add_argument(
752
+ "--log",
753
+ "-l",
754
+ type=str,
755
+ default=f"./chatweb.log",
756
+ help="path to logs",
757
+ )
758
+ return parser.parse_known_args()
759
+
760
+
761
+ def load_model_configs(config_path):
762
+ with open(config_path, "r") as f:
763
+ return json.load(f)
764
+
765
+
766
+ def setup_sidebar(model_config_list):
767
+ """
768
+ 设置侧边栏,用于配置模型生成参数。
769
+
770
+ Args:
771
+ model_config_list (list): 模型配置列表。
772
+
773
+ Returns:
774
+ tuple: (kwargs_list, visibility_list),分别存储每个模型的生成参数和可见性设置。
775
+ """
776
+ kwargs_list = []
777
+ visibility_list = []
778
+ with st.sidebar:
779
+ st.header("Generate Parameters")
780
+ for model_config in model_config_list:
781
+ with st.expander(f'{model_config["model_name"]}'):
782
+ kwargs = {}
783
+ for key, key_config in model_config["generate_parameter"].items():
784
+ if not isinstance(key_config, dict):
785
+ kwargs[key] = key_config
786
+ continue
787
+ value = getattr(st, key_config["component"])(
788
+ key,
789
+ key=f'{model_config["model_name"]}-{key}',
790
+ **key_config["kwargs"],
791
+ )
792
+ kwargs[key] = value
793
+ kwargs_list.append(kwargs)
794
+ visibility = st.toggle(
795
+ "启用",
796
+ value=model_config.get("visibility", True),
797
+ key=f'{model_config["model_name"]}-visibility',
798
+ )
799
+ visibility_list.append(visibility)
800
+ return kwargs_list, visibility_list
801
+
802
+
803
+ def initialize_session_state():
804
+ if "chat" not in st.session_state:
805
+ st.session_state["chat"] = []
806
+
807
+
808
+ def display_chat_history(chat_container):
809
+ with chat_container:
810
+ for chat in st.session_state["chat"]:
811
+ display_chat(chat)
812
+
813
+ def handle_user_input(user_input, args, chat_container, model_config_list, kwargs_list, visibility_list):
814
+ """
815
+ 处理用户输入,调用模型生成响应,并更新聊天记录。
816
+
817
+ Args:
818
+ user_input (str): 用户输入内容。
819
+ args (argparse.Namespace): 命令行参数。
820
+ chat_container (streamlit.container): 用于显示聊天记录的容器。
821
+ model_config_list (list): 模型配置列表。
822
+ kwargs_list (list): 每个模型的生成参数列表。
823
+ visibility_list (list): 每个模型的可见性设置列表。
824
+ """
825
+ if user_input:
826
+ logging.info(f"User input received: {user_input}")
827
+ chat = {"input": user_input, "avatar": args.user_avatar, "response": []}
828
+ with chat_container:
829
+ with st.chat_message("user", avatar=args.user_avatar):
830
+ st.markdown(user_input, unsafe_allow_html=True)
831
+ column_list = st.columns(sum(visibility_list))
832
+
833
+ loop = asyncio.new_event_loop()
834
+ waiting_list = []
835
+ model_idx = 0
836
+ for model_config, kwargs, visibility in zip(
837
+ model_config_list, kwargs_list, visibility_list
838
+ ):
839
+ if not visibility:
840
+ continue
841
+ column = column_list[model_idx]
842
+ with column:
843
+ st.header(model_config["model_name"])
844
+ with st.chat_message("assistant", avatar=model_config["avatar"]):
845
+ widget = st.empty()
846
+ history = []
847
+ for history_chat in st.session_state["chat"]:
848
+ history.append(
849
+ (
850
+ history_chat["input"],
851
+ history_chat["response"][model_idx]["text"],
852
+ )
853
+ )
854
+ # prompt = tokenize_prompt(
855
+ # history, user_input, model_config["model_path"]
856
+ # )
857
+ chat["response"].append(
858
+ {
859
+ "model": model_config["model_name"],
860
+ "text": "",
861
+ "avatar": model_config["avatar"],
862
+ }
863
+ )
864
+ waiting_list.append(
865
+ fetch_model_response(widget, history, user_input, model_config, chat["response"][-1], **kwargs)
866
+ )
867
+ model_idx += 1
868
+
869
+ loop.run_until_complete(asyncio.wait(waiting_list))
870
+ st.session_state["chat"].append(chat)
871
+
872
+ # log chat response
873
+ for idx, chat_response in enumerate(chat["response"]):
874
+ logging.info(f"Model: {chat_response['model']}, Response: {chat_response['text']}")
875
+
876
+
877
+
878
+
879
+ def main():
880
+ args, _ = parse_arguments()
881
+ setup_logging(args.log) # 初始化日志配置
882
+
883
+ logging.debug("Starting application")
884
+ model_config_list = load_model_configs(args.config)
885
+
886
+ st.title(TITLE)
887
+ initialize_session_state()
888
+
889
+ kwargs_list, visibility_list = setup_sidebar(model_config_list)
890
+
891
+ chat_container = st.container()
892
+ display_chat_history(chat_container)
893
+
894
+ user_input = st.chat_input("")
895
+ handle_user_input(user_input, args, chat_container, model_config_list, kwargs_list, visibility_list)
896
+
897
+
898
+ if __name__ == "__main__":
899
+ main()
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/SimpleDeepSearcher_demo_1.py ADDED
@@ -0,0 +1,1043 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # === 标准库导入(Python 内置模块) ===
2
+ import argparse
3
+ import asyncio
4
+ import json
5
+ import logging
6
+ import multiprocessing
7
+ import os
8
+ import re
9
+ import string
10
+ import time
11
+ from functools import partial
12
+ from logging.handlers import RotatingFileHandler
13
+ from typing import Optional, Tuple, List, Dict
14
+ import hashlib
15
+
16
+ # === 第三方库 ===
17
+ import httpx
18
+ import numpy as np
19
+ import streamlit as st
20
+ import torch
21
+ from tqdm import tqdm
22
+ from urllib.parse import urlparse
23
+ # === 第三方 Transformers & vLLM ===
24
+ from transformers import AutoTokenizer
25
+ from vllm import LLM, SamplingParams
26
+ from google_search import (
27
+ google_web_search,
28
+ extract_relevant_info,
29
+ fetch_page_content,
30
+ extract_snippet_with_context
31
+ )
32
+ # from evaluate import (
33
+ # run_evaluation,
34
+ # run_evaluation_for_eval,
35
+ # extract_answer
36
+ # )
37
+ from prompts import (
38
+ get_multiqa_instruction,
39
+ get_math_instruction,
40
+ get_task_instruction_openqa,
41
+ get_task_instruction_math,
42
+ get_webpage_to_reasonchain_instruction
43
+ )
44
+ from openai import OpenAI
45
+
46
+ from stage_wise_analysis import stage_wise_analysis
47
+
48
+ # Define special tokens
49
+ BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
50
+ END_SEARCH_QUERY = "<|end_search_query|>"
51
+ BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
52
+ END_SEARCH_RESULT = "<|end_search_result|>"
53
+
54
+
55
+
56
+ # 应用标题
57
+ TITLE = "与深度搜索模型对话"
58
+
59
+ # 设置 Streamlit 页面配置
60
+ st.set_page_config(page_title=TITLE, layout="wide", initial_sidebar_state="collapsed")
61
+
62
+
63
+ # --- Authentication Helper Functions ---
64
+ USERS_FILE = "users.json"
65
+
66
+ def hash_password(password):
67
+ """Hashes the password using SHA-256."""
68
+ return hashlib.sha256(password.encode()).hexdigest()
69
+
70
+ def load_users():
71
+ """Loads users from the JSON file."""
72
+ if os.path.exists(USERS_FILE):
73
+ with open(USERS_FILE, 'r') as f:
74
+ try:
75
+ return json.load(f)
76
+ except json.JSONDecodeError:
77
+ return {}
78
+ return {}
79
+
80
+ def save_users(users_data):
81
+ """Saves users to the JSON file."""
82
+ with open(USERS_FILE, 'w') as f:
83
+ json.dump(users_data, f, indent=4)
84
+
85
+ def verify_user(username, password):
86
+ """Verifies user credentials."""
87
+ users = load_users()
88
+ if username in users:
89
+ hashed_password = hash_password(password)
90
+ if users[username]['password'] == hashed_password:
91
+ return True
92
+ return False
93
+
94
+ def register_user(username, password):
95
+ """Registers a new user."""
96
+ users = load_users()
97
+ if username in users:
98
+ return False, "Username already exists."
99
+ if not username or not password:
100
+ return False, "Username and password cannot be empty."
101
+ # Add more password complexity rules if needed
102
+ users[username] = {'password': hash_password(password)}
103
+ save_users(users)
104
+ return True, "Registration successful! Please login."
105
+
106
+ # --- End Authentication Helper Functions ---
107
+ def tokenize_prompt(history, query, model_path, multi_turn=False):
108
+ messages = []
109
+ if multi_turn:
110
+ for q, r in history:
111
+ messages.append({"role": "user", "content": q})
112
+ messages.append({"role": "assistant", "content": r})
113
+ messages.append({"role": "user", "content": query})
114
+
115
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
116
+ return tokenizer.apply_chat_template(
117
+ messages, add_generation_prompt=True, tokenize=False
118
+ )
119
+
120
+ # 配置日志
121
+ def setup_logging(log_file):
122
+ logging.basicConfig(
123
+ level=logging.INFO,
124
+ format="%(asctime)s - %(levelname)s - %(message)s",
125
+ handlers=[
126
+ RotatingFileHandler(log_file, maxBytes=1024 * 1024 * 5, backupCount=5), # 日志文件最大 5MB,保留 5 个备份
127
+ logging.StreamHandler(), # 同时输出到控制台
128
+ ],
129
+ )
130
+
131
+
132
+ def display_chat(chat):
133
+ with st.chat_message("user", avatar=chat["avatar"]):
134
+ st.markdown(chat["input"], unsafe_allow_html=True)
135
+ column_list = st.columns(len(chat["response"]))
136
+ for i, (column, response) in enumerate(zip(column_list, chat["response"])):
137
+ with column:
138
+ st.header(response["model"])
139
+ with st.chat_message("assistant", avatar=response["avatar"]):
140
+ st.markdown(response["text"], unsafe_allow_html=True)
141
+
142
+ def extract_answer(output, mode='gen'):
143
+ extracted_text = ''
144
+ if output is None:
145
+ output = "None"
146
+ if mode == 'codegen':
147
+ # Extract the code between ```python and ```
148
+ pattern = r'```python\s*(.*?)\s*```'
149
+ matches = re.findall(pattern, output, re.DOTALL | re.IGNORECASE)
150
+ if matches:
151
+ extracted_text = matches[-1].strip() # Take the last match
152
+ elif mode == 'infogen': # 提取模型基于网页内容生成的推理
153
+ # Extract content after **Final Information** or **Modified Reasoning Steps**
154
+ # pattern_info = "\n**Final Information**"
155
+ # pattern_step = "\n**Modified Reasoning Steps**"
156
+ pattern_info = "**Final Information**"
157
+ pattern_step = "**Modified Reasoning Steps**"
158
+ if pattern_info in output:
159
+ extracted_text = output.split(pattern_info)[-1].replace("\n","").strip("```").strip()
160
+ elif pattern_step in output:
161
+ extracted_text = output.split(pattern_step)[-1].strip("```").strip()
162
+ else:
163
+ # extracted_text = "No helpful information found."
164
+ extracted_text = output
165
+ else:
166
+ # Existing extraction logic for 'gen' and 'choose' modes
167
+ pattern = r'\\boxed\{(.*)\}'
168
+ matches = re.findall(pattern, output)
169
+ if matches:
170
+ extracted_text = matches[-1] # Take the last match
171
+ if mode in ['choose', 'qa']:
172
+ # Handle 'choose' mode
173
+ inner_pattern = r'\\text\{(.*)\}'
174
+ inner_matches = re.findall(inner_pattern, extracted_text)
175
+ if inner_matches:
176
+ extracted_text = inner_matches[-1] # Take the last match
177
+ extracted_text = extracted_text.strip("()")
178
+ return extracted_text
179
+
180
+ # def generate_favicon_html(urls):
181
+ # favicons = []
182
+ # for url in urls:
183
+ # domain = urlparse(url).netloc
184
+ # favicon_url = f"https://{domain}/favicon.ico"
185
+ # favicons.append(f'<img src="{favicon_url}" width="16" style="margin-right:4px;">')
186
+ # return "**🌐 Fetching web pages:** " + "".join(favicons)
187
+
188
+ def generate_favicon_html(urls):
189
+ favicons = []
190
+ for url in urls:
191
+ domain = urlparse(url).netloc
192
+ # 使用 Google 的 S2 Favicon 服务
193
+ # sz=16 表示获取 16x16 大小的图标
194
+ favicon_service_url = f"https://www.google.com/s2/favicons?domain={domain}&sz=16"
195
+ favicons.append(f'<img src="{favicon_service_url}" width="16" style="margin-right:4px;" alt=" " onerror="this.style.display=\'none\'">')
196
+ return "**🌐 Fetching web pages:** " + "".join(favicons)
197
+
198
+ def webpage_analysis_single(summ_model_url, summ_model_path, prompt) -> str:
199
+ client_summ_model = OpenAI(
200
+ base_url=summ_model_url,
201
+ api_key="EMPTY"
202
+ )
203
+ for i in range(10): # max retry 10 times
204
+ try:
205
+ completion = client_summ_model.chat.completions.create(
206
+ model=summ_model_path,
207
+ max_tokens=8192,
208
+ temperature=0.6,
209
+ top_p=0.95,
210
+ messages=[prompt],
211
+ )
212
+ return completion.choices[0].message.content
213
+ except Exception as e:
214
+ logging.info(e)
215
+ time.sleep(1)
216
+ continue
217
+ return "None"
218
+
219
+
220
+
221
+ async def fetch_model_response(widget, history, question, model_config, response_dict, **kwargs):
222
+ filtered_data = [{"Question": question, "Answer": ""}]
223
+
224
+ # temperature = args.temperature
225
+ # top_p = args.top_p
226
+ # top_k_sampling = args.top_k_sampling
227
+ # max_tokens = args.max_tokens
228
+
229
+
230
+ dataset_name = "user_queries"
231
+ subset_num = -1
232
+ MAX_SEARCH_LIMIT = 10
233
+ MAX_TURN = 15
234
+ top_k = 10
235
+ max_doc_len = 3000
236
+ summ_model_path = "/capacity/userdata/models/QwQ-32B"
237
+ summ_model_url = "http://localhost:8003/v1"
238
+ google_subscription_key = "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d"
239
+ google_endpoint = "https://google.serper.dev/search"
240
+ cache_dir = "../cache"
241
+ output_dir = "output"
242
+ is_exclude_urls = False
243
+ max_tokens = 20480
244
+
245
+
246
+ search_cache_path = os.path.join(cache_dir, 'search_cache.json')
247
+ url_cache_path = os.path.join(cache_dir, 'url_cache.json')
248
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
249
+ search_cache = json.load(f)
250
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
251
+ url_cache = json.load(f)
252
+
253
+ widget_output = ""
254
+
255
+
256
+ # Function to save caches
257
+ def save_caches():
258
+ with open(search_cache_path, 'w', encoding='utf-8') as f:
259
+ json.dump(search_cache, f, ensure_ascii=False, indent=2)
260
+ with open(url_cache_path, 'w', encoding='utf-8') as f:
261
+ json.dump(url_cache, f, ensure_ascii=False, indent=2)
262
+
263
+ def generate_webpage_to_reasonchain_batch(
264
+ original_questions: List[str],
265
+ prev_reasonings: List[str],
266
+ search_queries: List[str],
267
+ documents: List[str],
268
+ summ_model_url: OpenAI,
269
+ summ_model_path: str,
270
+ batch_output_records: List[Dict], # New parameter to collect outputs
271
+ coherent: bool = False,
272
+ ) -> List[str]:
273
+
274
+ user_prompts = [
275
+ get_webpage_to_reasonchain_instruction(r, sq, doc)
276
+ for r, sq, doc in zip(prev_reasonings, search_queries, documents)
277
+ ]
278
+
279
+ logging.info(f"generate_webpage_to_reasonchain_batch, len(user_prompts): {len(user_prompts)}")
280
+ prompts = [{"role": "user", "content": up} for up in user_prompts]
281
+ logging.info("webpage ana prompts[0]")
282
+ logging.info(prompts[0])
283
+
284
+ raw_outputs = []
285
+
286
+ for prompt in prompts:
287
+ raw_output = webpage_analysis_single(summ_model_url, summ_model_path, prompt)
288
+ raw_outputs.append(raw_output)
289
+ # webpage_analysis_single_to_map = partial(webpage_analysis_single, summ_model_url, summ_model_path)
290
+ # with multiprocessing.Pool(processes=10) as pool:
291
+ # raw_outputs = list(tqdm(pool.imap(webpage_analysis_single_to_map, prompts), total=len(prompts), desc="generate webpage analyses"))
292
+
293
+
294
+ # Count the number of summarization errors
295
+ sum_error = 0
296
+ for output in raw_outputs:
297
+ if output is None or output == "None" or output == "":
298
+ sum_error += 1
299
+ logging.info(f"summarization_error: {sum_error}, ratios: {sum_error / len(raw_outputs)}")
300
+
301
+ extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs]
302
+
303
+ for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
304
+ batch_output_records.append({
305
+ 'prompt': p,
306
+ 'raw_output': r,
307
+ 'extracted_info': e
308
+ })
309
+
310
+
311
+
312
+ return extracted_infos
313
+
314
+ # ---------------------- Preparation of Input Prompts ----------------------
315
+ input_list = []
316
+ for item in filtered_data:
317
+ question = item['Question']
318
+
319
+ if dataset_name in ['aime']:
320
+ instruction = get_multiqa_instruction(MAX_SEARCH_LIMIT)
321
+ user_prompt = get_task_instruction_math(question)
322
+
323
+ else:
324
+ instruction = get_multiqa_instruction(MAX_SEARCH_LIMIT)
325
+ user_prompt = get_task_instruction_openqa(question)
326
+
327
+
328
+ # prompt = [{"role": "user", "content": instruction + user_prompt}]
329
+ prompt = instruction + user_prompt
330
+ prompt = tokenize_prompt(history, prompt, model_config['model_path'], multi_turn=True)
331
+ # prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
332
+ input_list.append(prompt)
333
+ logging.info(f"Input prompt for question '{question}':\n {prompt}")
334
+
335
+ # Initialize active sequences
336
+ active_sequences = [{
337
+ 'item': item,
338
+ 'prompt': prompt,
339
+ 'output': '',
340
+ 'finished': False,
341
+ 'history': [],
342
+ 'search_count': 0,
343
+ 'executed_search_queries': set(),
344
+ 'all_info': [],
345
+ } for item, prompt in zip(filtered_data, input_list)]
346
+
347
+ async def run_generation(sequences: List[Dict], max_tokens: int, turn: int) -> List:
348
+ prompts = [s['prompt'] for s in sequences]
349
+ headers = {"User-Agent": "Test Client"}
350
+ payload = {
351
+ "prompt": prompts[0],
352
+ "max_tokens": max_tokens,
353
+ "temperature": 0.6,
354
+ "top_p": 0.95,
355
+ "top_k": 40,
356
+ "stop":[END_SEARCH_QUERY, "<|im_end|>"],
357
+ "include_stop_str_in_output": True,
358
+ "stream": True,
359
+ }
360
+ payload.update(kwargs)
361
+ logging.debug(f"Sending request to model: {model_config['model_name']}")
362
+ logging.debug(f"Request payload: {payload}")
363
+ client = httpx.AsyncClient()
364
+ async with client.stream(
365
+ "POST", model_config["api_url"], headers=headers, json=payload
366
+ ) as response:
367
+ output_final = ""
368
+
369
+ async for chunk in response.aiter_bytes(): # 1,2,3
370
+ if chunk:
371
+ try:
372
+ data = json.loads(chunk.decode("utf-8").rstrip("\x00"))
373
+ output = data["text"][0][len(prompts[0]) :]
374
+ # logging.info(f"output11111: {output}")
375
+ # widget_output += output
376
+ if widget_output == "":
377
+ widget.markdown(f"**✨ Reasoning {turn}:** " + output, unsafe_allow_html=True)
378
+ else:
379
+ widget.markdown(widget_output + f"\n**✨ Reasoning {turn}:** " + output, unsafe_allow_html=True)
380
+ output_final = output
381
+ # output_buffer += output
382
+ except json.JSONDecodeError as e:
383
+ # logging.error(f"Attention!: JSON decode error: {e}, chunk: {chunk}")
384
+ continue
385
+
386
+ return [output_final]
387
+
388
+
389
+
390
+ # Function to extract text between two tags
391
+ def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
392
+ pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
393
+ matches = re.findall(pattern, text, flags=re.DOTALL)
394
+ if matches:
395
+ return matches[-1].strip()
396
+ return None
397
+
398
+ def replace_recent_steps(origin_str, replace_str):
399
+ """
400
+ Replaces specific steps in the original reasoning steps with new steps.
401
+ If a replacement step contains "DELETE THIS STEP", that step is removed.
402
+
403
+ Parameters:
404
+ - origin_str (str): The original reasoning steps.
405
+ - replace_str (str): The steps to replace or delete.
406
+
407
+ Returns:
408
+ - str: The updated reasoning steps after applying replacements.
409
+ """
410
+
411
+ def parse_steps(text):
412
+ """
413
+ Parses the reasoning steps from a given text.
414
+
415
+ Parameters:
416
+ - text (str): The text containing reasoning steps.
417
+
418
+ Returns:
419
+ - dict: A dictionary mapping step numbers to their content.
420
+ """
421
+ step_pattern = re.compile(r"Step\s+(\d+):\s*")
422
+ steps = {}
423
+ current_step_num = None
424
+ current_content = []
425
+
426
+ for line in text.splitlines():
427
+ step_match = step_pattern.match(line)
428
+ if step_match:
429
+ # If there's an ongoing step, save its content
430
+ if current_step_num is not None:
431
+ steps[current_step_num] = "\n".join(current_content).strip()
432
+ current_step_num = int(step_match.group(1))
433
+ content = line[step_match.end():].strip()
434
+ current_content = [content] if content else []
435
+ else:
436
+ if current_step_num is not None:
437
+ current_content.append(line)
438
+
439
+ # Save the last step if any
440
+ if current_step_num is not None:
441
+ steps[current_step_num] = "\n".join(current_content).strip()
442
+
443
+ return steps
444
+
445
+ # Parse the original and replacement steps
446
+ origin_steps = parse_steps(origin_str)
447
+ replace_steps = parse_steps(replace_str)
448
+
449
+ # Apply replacements
450
+ for step_num, content in replace_steps.items():
451
+ if "DELETE THIS STEP" in content:
452
+ # Remove the step if it exists
453
+ if step_num in origin_steps:
454
+ del origin_steps[step_num]
455
+ else:
456
+ # Replace or add the step
457
+ origin_steps[step_num] = content
458
+
459
+ # Sort the steps by step number
460
+ sorted_steps = sorted(origin_steps.items())
461
+
462
+ # Reconstruct the reasoning steps as a single string
463
+ new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps])
464
+
465
+ return new_reasoning_steps
466
+
467
+
468
+ # ---------------------- Initialize Collection Structure ----------------------
469
+ # Initialize a list to collect batch outputs
470
+ batch_output_records = []
471
+
472
+ start_time = time.time()
473
+ turn = 0
474
+
475
+ # Main loop until all sequences are finished or maximum turns reached
476
+ while True:
477
+ # Identify sequences that need generation
478
+ sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']]
479
+
480
+ if sequences_needing_generation:
481
+ turn += 1
482
+ logging.info(f'\n-------------- Turn {turn} --------------')
483
+ logging.info(f"We have {len(sequences_needing_generation)} sequences needing generation...")
484
+ outputs = await run_generation(sequences_needing_generation, max_tokens, turn)
485
+ if widget_output == "":
486
+ widget_output += f"**✨ Reasoning {turn}:** " + outputs[0]
487
+ else:
488
+ widget_output += f"\n**✨ Reasoning {turn}:** " + outputs[0] # Append the first output to widget_output
489
+ widget.markdown(widget_output, unsafe_allow_html=True)
490
+ logging.info("Generation completed, processing outputs...")
491
+
492
+ # Initialize batch variables
493
+ batch_relevant_info = []
494
+ batch_original_questions = []
495
+ batch_prev_reasonings = []
496
+ batch_search_queries = []
497
+ batch_documents = []
498
+ batch_sequences = []
499
+
500
+ # Collect URLs to fetch across all sequences
501
+ all_urls_to_fetch = set()
502
+ url_snippets = {}
503
+ url_sequence_map = {} # Map URL to list of sequences needing it
504
+
505
+ start_search_time = time.time()
506
+ # Process each sequence and collect URLs
507
+ for seq, out in zip(sequences_needing_generation, outputs):
508
+ text = out
509
+ seq['history'].append(text)
510
+ # Append generated text to prompt and output
511
+ seq['prompt'] += text
512
+ seq['output'] += text
513
+ seq['all_info'].append({f"turn_{turn}_reason": text})
514
+ # Extract search query
515
+ search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
516
+
517
+ # If a search query is present and needs to be executed
518
+ if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
519
+ if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
520
+ # Execute search, use cache if available
521
+ if search_query in search_cache:
522
+ results = search_cache[search_query]
523
+ logging.info(f"Using cached search results for query: \"{search_query}\"")
524
+ else:
525
+ try:
526
+ if is_exclude_urls and "urls" in seq["item"]["metadata"]:
527
+ logging.info(f"is_exclude_urls: {is_exclude_urls}")
528
+ exclude_urls = seq["item"]["metadata"]["urls"]
529
+ else:
530
+ exclude_urls = []
531
+
532
+ logging.info(f"Execute and cache search for query: \"{search_query}\"")
533
+
534
+ # 更新输出
535
+ widget_output += f"\n\n**🔍 Executing search for query:** {search_query}\n"
536
+ widget.markdown(widget_output, unsafe_allow_html=True)
537
+
538
+ results = google_web_search(search_query, google_subscription_key, google_endpoint, market='en-US', language='en', exclude_urls=exclude_urls) # 执行搜索
539
+ search_cache[search_query] = results
540
+ logging.info(f"Executed and cached search for query: \"{search_query}\"")
541
+ except Exception as e:
542
+ logging.info(f"Error during search query '{search_query}': {e}")
543
+ search_cache[search_query] = {}
544
+ results = {}
545
+
546
+ # Extract relevant information from Bing search results
547
+ relevant_info = extract_relevant_info(results)[:top_k]
548
+ seq['relevant_info'] = relevant_info
549
+
550
+ # Extract URLs and snippets
551
+ urls_to_fetch = [it['url'] for it in relevant_info]
552
+
553
+ ## 更新网页
554
+ widget_output += "\n\n" + generate_favicon_html(urls_to_fetch) + "\n"
555
+ widget.markdown(widget_output, unsafe_allow_html=True)
556
+
557
+ snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info}
558
+
559
+ # Filter URLs that are not cached
560
+ urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache]
561
+ cached_urls = [u for u in urls_to_fetch if u in url_cache]
562
+
563
+ # Store info for all_urls_to_fetch and url_snippets
564
+ for url in urls_to_fetch_filtered:
565
+ all_urls_to_fetch.add(url)
566
+ url_snippets[url] = snippets.get(url, "")
567
+
568
+ all_reasoning_steps = seq['output']
569
+ all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n")
570
+
571
+ truncated_prev_reasoning = ""
572
+ for i, step in enumerate(all_reasoning_steps):
573
+ truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
574
+
575
+ prev_steps = truncated_prev_reasoning.split('\n\n')
576
+ if len(prev_steps) <= 5:
577
+ truncated_prev_reasoning = '\n\n'.join(prev_steps)
578
+ else:
579
+ truncated_prev_reasoning = ''
580
+ for i, step in enumerate(prev_steps):
581
+ if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
582
+ truncated_prev_reasoning += step + '\n\n'
583
+ else:
584
+ if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
585
+ truncated_prev_reasoning += '...\n\n'
586
+ truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
587
+
588
+ # Collect parameters for batch processing
589
+ batch_relevant_info.append(relevant_info)
590
+ batch_original_questions.append(seq['item']['Question'])
591
+ batch_prev_reasonings.append(truncated_prev_reasoning)
592
+ batch_search_queries.append(search_query)
593
+ batch_sequences.append(seq)
594
+
595
+ # Update search count and executed queries
596
+ seq['search_count'] += 1
597
+ seq['executed_search_queries'].add(search_query)
598
+
599
+ elif seq['search_count'] >= MAX_SEARCH_LIMIT:
600
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
601
+ seq['prompt'] += limit_message
602
+ seq['output'] += limit_message
603
+ seq['history'].append(limit_message)
604
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
605
+ logging.info(f"Search limit reached for query: \"{search_query}\"")
606
+
607
+ elif search_query in seq['executed_search_queries']:
608
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
609
+ seq['prompt'] += limit_message
610
+ seq['output'] += limit_message
611
+ seq['history'].append(limit_message)
612
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
613
+ logging.info(f"Repeated search for query: \"{search_query}\"")
614
+
615
+
616
+ else:
617
+ # If no search query needs to be executed, mark the sequence as finished
618
+ seq['finished'] = True
619
+ logging.info("Sequence marked as complete.")
620
+
621
+ logging.info(f"get search time taken: {time.time() - start_search_time}")
622
+ logging.info(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
623
+ logging.info(f"all_urls_to_fetch: {all_urls_to_fetch}")
624
+ # Batch fetch all URLs at once to optimize speed
625
+
626
+ if all_urls_to_fetch:
627
+ logging.info(f"Fetching {len(all_urls_to_fetch)} URLs...")
628
+ try:
629
+ fetched_contents = fetch_page_content(
630
+ list(all_urls_to_fetch),
631
+ use_jina=False,
632
+ jina_api_key=None,
633
+ # snippets=url_snippets # Do not pass snippets when updating url_cache directly
634
+ )
635
+ logging.info(f"Fetched {len(fetched_contents)} URLs successfully.")
636
+ except Exception as e:
637
+ logging.info(f"Error during batch URL fetching: {e}")
638
+ fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
639
+ # Update cache with fetched contents
640
+ for url, content in fetched_contents.items():
641
+ url_cache[url] = content
642
+
643
+ # After fetching, prepare formatted documents for batch processing
644
+ for relevant_info in batch_relevant_info:
645
+ formatted_documents = ""
646
+ for i, doc_info in enumerate(relevant_info):
647
+ url = doc_info['url']
648
+ raw_context = url_cache.get(url, "")
649
+ doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
650
+ success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
651
+ if success:
652
+ logging.info("extract_snippet_with_context")
653
+ context = filtered_context
654
+ else:
655
+ logging.info(f"use raw_webpage_context, {len(raw_context)}")
656
+ context = raw_context[:max_doc_len*2]
657
+
658
+ doc_info['context'] = context
659
+ formatted_documents += f"**Web Page {i + 1}:**\n"
660
+ formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
661
+ logging.info(f'formatted_webpage_documents: {len(formatted_documents)}')
662
+ batch_documents.append(formatted_documents)
663
+
664
+ # After fetching, prepare for batch processing if there are any
665
+ if batch_sequences:
666
+ logging.info(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
667
+ webpage_analyses = generate_webpage_to_reasonchain_batch(
668
+ original_questions=batch_original_questions,
669
+ prev_reasonings=batch_prev_reasonings,
670
+ search_queries=batch_search_queries,
671
+ documents=batch_documents,
672
+ summ_model_url=summ_model_url,
673
+ summ_model_path=summ_model_path,
674
+ batch_output_records=batch_output_records, # Pass the collection list
675
+ )
676
+ logging.info("Batch generation completed, assigning outputs to sequences...")
677
+
678
+ for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents):
679
+ ## 更新输出
680
+ widget_output += f"\n**📄Web Pages Summary Results:**\n{analysis}\n"
681
+ widget.markdown(widget_output, unsafe_allow_html=True)
682
+ if isinstance(analysis, str):
683
+ append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n"
684
+ seq['prompt'] += append_text
685
+ seq['output'] += append_text
686
+ seq['history'].append(append_text)
687
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
688
+ else:
689
+ append_text = replace_recent_steps(seq['output'], analysis)
690
+ seq['prompt'] += append_text
691
+ seq['output'] += append_text
692
+ seq['history'].append(append_text)
693
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
694
+
695
+ # Check if all sequences are finished
696
+ active_sequences_part = [{
697
+ 'item': ele["item"],
698
+ 'prompt': ele['prompt'],
699
+ 'output': ele["output"],
700
+ 'finished': ele["finished"],
701
+ 'history':ele["history"],
702
+ 'search_count': ele["search_count"],
703
+ 'all_info': ele['all_info']
704
+ } for ele in active_sequences]
705
+ # with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
706
+ # json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
707
+ unfinished = [seq for seq in active_sequences if not seq['finished']]
708
+ if not unfinished:
709
+ break
710
+ else:
711
+ if turn >= MAX_TURN:
712
+ logging.info(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
713
+ break
714
+
715
+ total_time = time.time() - start_time
716
+ logging.info(f"Total time taken: {total_time} seconds")
717
+
718
+ # # ---------------------- Save Batch Output Records to JSON File ----------------------
719
+ # # Define output JSON file path
720
+ # t = time.localtime()
721
+ # batch_output_file = os.path.join(output_dir, f'test.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
722
+
723
+ # # Save batch_output_records to JSON file
724
+ # with open(batch_output_file, 'w', encoding='utf-8') as f:
725
+ # json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
726
+
727
+ # logging.info(f"Batch outputs saved to {batch_output_file}")
728
+
729
+ # Prepare output list for evaluation
730
+ output_list = [seq['output'] for seq in active_sequences]
731
+
732
+ response_dict["text"] = widget_output# Update the response dictionary with the first output
733
+
734
+ # # Run evaluation
735
+ # if dataset_name in ["eval", "gaia"]:
736
+ # run_evaluation_for_eval(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, 'test')
737
+ # else:
738
+ # run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, 'test')
739
+
740
+ # # ---------------------- Stage-wise Analysis ----------------------
741
+ # turn_files = os.listdir(output_dir)
742
+ # turn_files = [file for file in turn_files if file.startswith("turn_")]
743
+ # max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
744
+
745
+ # max_turn_file_path = os.path.join(output_dir, max_turn_file)
746
+ # logging.info(f"max_turn_file_path: {max_turn_file_path}")
747
+ # stage_wise_analysis(model_path, max_turn_file_path)
748
+
749
+ # ---------------------- Update Search and URL Cache ----------------------
750
+ logging.info('Updating Search and URL Cache...')
751
+ # Load existing caches or initialize empty dictionaries
752
+ if os.path.exists(search_cache_path):
753
+ try:
754
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
755
+ search_cache_new = json.load(f)
756
+ except Exception as e:
757
+ logging.info(f"Error loading search cache: {e}")
758
+ search_cache_new = {}
759
+ else:
760
+ search_cache_new = {}
761
+
762
+ if os.path.exists(url_cache_path):
763
+ try:
764
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
765
+ url_cache_new = json.load(f)
766
+ except Exception as e:
767
+ logging.info(f"Error loading url cache: {e}")
768
+ url_cache_new = {}
769
+ else:
770
+ url_cache_new = {}
771
+
772
+ search_cache.update(search_cache_new)
773
+ url_cache.update(url_cache_new)
774
+
775
+ save_caches()
776
+
777
+ logging.info("Process completed.")
778
+
779
+ def parse_arguments():
780
+ parser = argparse.ArgumentParser()
781
+ parser.add_argument(
782
+ "--config",
783
+ "-c",
784
+ type=str,
785
+ default="./demo_client_basic.json",
786
+ help="path to configs",
787
+ )
788
+ parser.add_argument(
789
+ "--user_avatar",
790
+ "-a",
791
+ type=str,
792
+ default="./user_demo.png",
793
+ help="path to user avatar",
794
+ )
795
+ parser.add_argument(
796
+ "--log",
797
+ "-l",
798
+ type=str,
799
+ default=f"./chatweb.log",
800
+ help="path to logs",
801
+ )
802
+ return parser.parse_known_args()
803
+
804
+
805
+ def load_model_configs(config_path):
806
+ with open(config_path, "r") as f:
807
+ return json.load(f)
808
+
809
+
810
+ def setup_sidebar(model_config_list):
811
+ """
812
+ 设置侧边栏,用于配置模型生成参数。
813
+
814
+ Args:
815
+ model_config_list (list): 模型配置列表。
816
+
817
+ Returns:
818
+ tuple: (kwargs_list, visibility_list),分别存储每个模型的生成参数和可见性设置。
819
+ """
820
+ kwargs_list = []
821
+ visibility_list = []
822
+ with st.sidebar:
823
+ st.header("Generate Parameters")
824
+ for model_config in model_config_list:
825
+ with st.expander(f'{model_config["model_name"]}'):
826
+ kwargs = {}
827
+ for key, key_config in model_config["generate_parameter"].items():
828
+ if not isinstance(key_config, dict):
829
+ kwargs[key] = key_config
830
+ continue
831
+ value = getattr(st, key_config["component"])(
832
+ key,
833
+ key=f'{model_config["model_name"]}-{key}',
834
+ **key_config["kwargs"],
835
+ )
836
+ kwargs[key] = value
837
+ kwargs_list.append(kwargs)
838
+ visibility = st.toggle(
839
+ "启用",
840
+ value=model_config.get("visibility", True),
841
+ key=f'{model_config["model_name"]}-visibility',
842
+ )
843
+ visibility_list.append(visibility)
844
+ return kwargs_list, visibility_list
845
+
846
+
847
+ def initialize_session_state():
848
+ if "chat" not in st.session_state:
849
+ st.session_state["chat"] = []
850
+
851
+
852
+ def initialize_auth_session_state():
853
+ """Initializes session state variables for authentication."""
854
+ if 'logged_in' not in st.session_state:
855
+ st.session_state.logged_in = False
856
+ if 'current_user' not in st.session_state:
857
+ st.session_state.current_user = None
858
+ if 'page' not in st.session_state: # To control navigation: login, register, chat
859
+ st.session_state.page = 'login'
860
+
861
+ def display_chat_history(chat_container):
862
+ with chat_container:
863
+ for chat in st.session_state["chat"]:
864
+ display_chat(chat)
865
+
866
+
867
+ def handle_user_input(user_input, args, chat_container, model_config_list, kwargs_list, visibility_list):
868
+ """
869
+ 处理用户输入,调用模型生成响应,并更新聊天记录。
870
+
871
+ Args:
872
+ user_input (str): 用户输入内容。
873
+ args (argparse.Namespace): 命令行参数。
874
+ chat_container (streamlit.container): 用于显示聊天记录的容器。
875
+ model_config_list (list): 模型配置列表。
876
+ kwargs_list (list): 每个模型的生成参数列表。
877
+ visibility_list (list): 每个模型的可见性设置列表。
878
+ """
879
+ if user_input:
880
+ logging.info(f"User input received: {user_input}")
881
+ chat = {"input": user_input, "avatar": args.user_avatar, "response": []}
882
+ with chat_container:
883
+ with st.chat_message("user", avatar=args.user_avatar):
884
+ st.markdown(user_input, unsafe_allow_html=True)
885
+ column_list = st.columns(sum(visibility_list))
886
+
887
+ loop = asyncio.new_event_loop()
888
+ waiting_list = []
889
+ model_idx = 0
890
+ for model_config, kwargs, visibility in zip(
891
+ model_config_list, kwargs_list, visibility_list
892
+ ):
893
+ if not visibility:
894
+ continue
895
+ column = column_list[model_idx]
896
+ with column:
897
+ st.header(model_config["model_name"])
898
+ with st.chat_message("assistant", avatar=model_config["avatar"]):
899
+ widget = st.empty()
900
+ history = []
901
+ for history_chat in st.session_state["chat"]:
902
+ history.append(
903
+ (
904
+ history_chat["input"],
905
+ history_chat["response"][model_idx]["text"],
906
+ )
907
+ )
908
+ # prompt = tokenize_prompt(
909
+ # history, user_input, model_config["model_path"]
910
+ # )
911
+ chat["response"].append(
912
+ {
913
+ "model": model_config["model_name"],
914
+ "text": "",
915
+ "avatar": model_config["avatar"],
916
+ }
917
+ )
918
+ waiting_list.append(
919
+ fetch_model_response(widget, history, user_input, model_config, chat["response"][-1], **kwargs)
920
+ )
921
+ model_idx += 1
922
+
923
+ loop.run_until_complete(asyncio.wait(waiting_list))
924
+ st.session_state["chat"].append(chat)
925
+
926
+ # log chat response
927
+ for idx, chat_response in enumerate(chat["response"]):
928
+ logging.info(f"Model: {chat_response['model']}, Response: {chat_response['text']}")
929
+
930
+
931
+ # 定义科技感的颜色
932
+ color_shendu = "#00BFFF" # 例如:深天蓝 (DeepSkyBlue) - 一种科技蓝
933
+ color_sousuo = "#00FFFF" # 例如:青色 (Cyan/Aqua) - 另一种明亮的科技感颜色
934
+ # 或者尝试其他组合:
935
+ # color_shendu = "#7DF9FF" # 电光蓝 (Electric Blue)
936
+ # color_sousuo = "#00FF7F" # 春绿色 (SpringGreen)
937
+
938
+ # 构建带有内联CSS的HTML字符串
939
+ # 我们将使用 <h2> 标签,它通常在视觉上与 st.header() 类似
940
+ header_html = f"""
941
+ <h2 style="text-align: center; font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;">
942
+ <span style="color: {color_sousuo};">深度</span><span style="color: {color_shendu};">搜索系统</span>
943
+ </h2>
944
+ """
945
+
946
+
947
+
948
+ # --- Authentication Pages ---
949
+ def show_login_page():
950
+ # 使用 st.markdown 显示自定义标题
951
+ st.markdown(header_html, unsafe_allow_html=True)
952
+
953
+
954
+ # st.header("深度搜索系统")
955
+ st.subheader("登录")
956
+ with st.form("login_form"):
957
+ username = st.text_input("用户名", key="login_username")
958
+ password = st.text_input("密码", type="password", key="login_password")
959
+ submit_button = st.form_submit_button("登录")
960
+
961
+ if submit_button:
962
+ if verify_user(username, password):
963
+ st.session_state.logged_in = True
964
+ st.session_state.current_user = username
965
+ st.session_state.page = 'chat' # Navigate to chat
966
+ initialize_session_state() # Initialize chat state for the user
967
+ st.success("登录成功!")
968
+ st.rerun() # Rerun to reflect login state and show chat page
969
+ else:
970
+ st.error("用户名或密码错误!")
971
+
972
+ if st.button("新用户注册", key="goto_register"):
973
+ st.session_state.page = 'register'
974
+ st.rerun()
975
+
976
+ def show_registration_page():
977
+ st.markdown(header_html, unsafe_allow_html=True)
978
+ st.subheader("新用户注册")
979
+ with st.form("registration_form"):
980
+ new_username = st.text_input("用户名", key="reg_username")
981
+ new_password = st.text_input("密码", type="password", key="reg_password")
982
+ confirm_password = st.text_input("确认密码", type="password", key="reg_confirm_password")
983
+ reg_submit_button = st.form_submit_button("注册")
984
+
985
+ if reg_submit_button:
986
+ if not new_username or not new_password:
987
+ st.warning("密码不能为空!")
988
+ elif new_password != confirm_password:
989
+ st.error("密码错误!")
990
+ else:
991
+ success, message = register_user(new_username, new_password)
992
+ if success:
993
+ st.success(message)
994
+ st.session_state.page = 'login' # Go to login page after registration
995
+ st.rerun()
996
+ else:
997
+ st.error(message)
998
+
999
+ if st.button("已经有了账户?请登录", key="goto_login"):
1000
+ st.session_state.page = 'login'
1001
+ st.rerun()
1002
+
1003
+
1004
+ def main():
1005
+ args, _ = parse_arguments()
1006
+ setup_logging(args.log)
1007
+ logging.debug("Starting application")
1008
+
1009
+ initialize_auth_session_state() # Initialize auth related session state first
1010
+
1011
+ if not st.session_state.get('logged_in', False):
1012
+ # User is not logged in, show login or registration page
1013
+ page_to_show = st.session_state.get('page', 'login')
1014
+ if page_to_show == 'login':
1015
+ show_login_page()
1016
+ elif page_to_show == 'register':
1017
+ show_registration_page()
1018
+ else:
1019
+ # User is logged in, show the main chat application
1020
+ st.title(TITLE)
1021
+
1022
+ # Initialize app-specific session state if not already done (e.g., after login)
1023
+ if "chat" not in st.session_state: # Should have been initialized by successful login
1024
+ initialize_session_state()
1025
+
1026
+ model_config_list = load_model_configs(args.config)
1027
+ if not model_config_list: # If config loading failed
1028
+ st.error("Failed to load model configurations. Chat functionality may be limited.")
1029
+ # Potentially stop further execution or provide guidance
1030
+ return
1031
+
1032
+ kwargs_list, visibility_list = setup_sidebar(model_config_list)
1033
+
1034
+ chat_container = st.container()
1035
+ display_chat_history(chat_container)
1036
+
1037
+ user_input = st.chat_input(f"Ask anything, {st.session_state.get('current_user', 'User')}...")
1038
+ if user_input: # Check if user_input is not None (happens on first run)
1039
+ handle_user_input(user_input, args, chat_container, model_config_list, kwargs_list, visibility_list)
1040
+
1041
+
1042
+ if __name__ == "__main__":
1043
+ main()
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/add_eval.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/bing_search.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/evaluate.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/google_search.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/google_search.cpython-311.pyc ADDED
Binary file (16.7 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/prompts.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/prompts.cpython-311.pyc ADDED
Binary file (4.51 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/stage_wise_analysis.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/__pycache__/stage_wise_analysis.cpython-311.pyc ADDED
Binary file (41 kB). View file
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/chatweb.log ADDED
The diff for this file is too large to render. See raw diff
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/demo_client_basic.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "model_name": "SimpDS-QwQ-32B",
4
+ "model_path": "/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41",
5
+ "api_url": "http://localhost:5126/generate",
6
+ "avatar": "./simplelog_demo.jpg",
7
+ "visibility": true,
8
+ "generate_parameter": {
9
+ "presence_penalty": {
10
+ "component": "number_input",
11
+ "kwargs": {
12
+ "min_value": -1.0,
13
+ "max_value": 1.0,
14
+ "value": 0.0
15
+ }
16
+ },
17
+ "frequency_penalty": {
18
+ "component": "number_input",
19
+ "kwargs": {
20
+ "min_value": -1.0,
21
+ "max_value": 1.0,
22
+ "value": 0.0
23
+ }
24
+ },
25
+ "max_tokens": {
26
+ "component": "number_input",
27
+ "kwargs": {
28
+ "min_value": 512,
29
+ "max_value": 32768,
30
+ "value": 20480
31
+ }
32
+ },
33
+ "temperature": {
34
+ "component": "number_input",
35
+ "kwargs": {
36
+ "min_value": 0.0,
37
+ "max_value": 1.0,
38
+ "value": 0.6
39
+ }
40
+ },
41
+ "top_p": {
42
+ "component": "number_input",
43
+ "kwargs": {
44
+ "min_value": 0.0,
45
+ "max_value": 1.0,
46
+ "value": 0.95
47
+ }
48
+ }
49
+ }
50
+ }
51
+ ]
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/evaluate.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append("..")
3
+
4
+ import re
5
+ import json
6
+ import numpy as np
7
+ from collections import Counter
8
+ import string
9
+ import os, time
10
+ from collections import defaultdict
11
+ from lcb_runner.evaluation import codegen_metrics
12
+ from utils.math_equivalence import is_equiv
13
+
14
+
15
+ def extract_answer(output, mode='gen'):
16
+ extracted_text = ''
17
+ if output is None:
18
+ output = "None"
19
+ if mode == 'codegen':
20
+ # Extract the code between ```python and ```
21
+ pattern = r'```python\s*(.*?)\s*```'
22
+ matches = re.findall(pattern, output, re.DOTALL | re.IGNORECASE)
23
+ if matches:
24
+ extracted_text = matches[-1].strip() # Take the last match
25
+ elif mode == 'infogen': # 提取模型基于网页内容生成的推理
26
+ # Extract content after **Final Information** or **Modified Reasoning Steps**
27
+ # pattern_info = "\n**Final Information**"
28
+ # pattern_step = "\n**Modified Reasoning Steps**"
29
+ pattern_info = "**Final Information**"
30
+ pattern_step = "**Modified Reasoning Steps**"
31
+ if pattern_info in output:
32
+ extracted_text = output.split(pattern_info)[-1].replace("\n","").strip("```").strip()
33
+ elif pattern_step in output:
34
+ extracted_text = output.split(pattern_step)[-1].strip("```").strip()
35
+ else:
36
+ # extracted_text = "No helpful information found."
37
+ extracted_text = output
38
+ else:
39
+ # Existing extraction logic for 'gen' and 'choose' modes
40
+ pattern = r'\\boxed\{(.*)\}'
41
+ matches = re.findall(pattern, output)
42
+ if matches:
43
+ extracted_text = matches[-1] # Take the last match
44
+ if mode in ['choose', 'qa']:
45
+ # Handle 'choose' mode
46
+ inner_pattern = r'\\text\{(.*)\}'
47
+ inner_matches = re.findall(inner_pattern, extracted_text)
48
+ if inner_matches:
49
+ extracted_text = inner_matches[-1] # Take the last match
50
+ extracted_text = extracted_text.strip("()")
51
+ return extracted_text
52
+
53
+
54
+ def normalize_answer(text):
55
+ text = text.lower()
56
+ text = " ".join(text.strip().split())
57
+ return text
58
+
59
+ def normalize_answer_qa(s):
60
+ def remove_articles(text):
61
+ return re.sub(r"\b(a|an|the)\b", " ", text)
62
+ def white_space_fix(text):
63
+ return " ".join(text.strip().split())
64
+ def remove_punc(text):
65
+ exclude = set(string.punctuation)
66
+ return "".join(ch for ch in text if ch not in exclude)
67
+ def lower(text):
68
+ return text.lower()
69
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
70
+
71
+
72
+ def evaluate_predictions(output, labeled_answer, mode='gen'):
73
+ final_metric = {"is_valid_answer": False, "acc": 0, "em": 0, "f1": 0, 'math_equal': 0}
74
+ # is_valid_answer: 是否存在有效的预测答案。
75
+ # acc: 精度(accuracy)。指标注答案是否出现在预测答案中
76
+ # em: 完全匹配(exact match)。指预测答案是否与标注答案完全相同
77
+ # f1: F1 分数。
78
+ # math_equal: 数学上的相等性(通常用于判断数值是否相等)
79
+ pred_answer = extract_answer(output, mode=mode)
80
+ if pred_answer != '': # 模型给出了有效的预测答案
81
+ final_metric["is_valid_answer"] = True
82
+
83
+ if mode == 'qa':
84
+ normalized_pred_answer = normalize_answer_qa(pred_answer)
85
+ # print(f"normalized_pred_answer: {normalized_pred_answer}")
86
+ for answer in labeled_answer:
87
+ normalized_ground_truth = normalize_answer_qa(answer)
88
+ # print(f"normalized_ground_truth: {normalized_ground_truth}--")
89
+ em = int(normalized_pred_answer == normalized_ground_truth)
90
+ acc = int(normalized_ground_truth in normalized_pred_answer)
91
+
92
+ # 将预测答案和标注答案分割成单词或词汇 tokens,并计算它们的交集(即相同的词汇)。
93
+ # Counter 是一个字典类型的对象,用于统计词汇的频次,& 操作符求得两个 Counter 对象的交集
94
+ prediction_tokens = normalized_pred_answer.split()
95
+ ground_truth_tokens = normalized_ground_truth.split()
96
+ common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
97
+ num_same = sum(common.values())
98
+ if num_same == 0:
99
+ continue
100
+ precision = 1.0 * num_same / len(prediction_tokens)
101
+ recall = 1.0 * num_same / len(ground_truth_tokens)
102
+ f1 = (2 * precision * recall) / (precision + recall)
103
+ for k in ["em", "acc", "f1"]:
104
+ final_metric[k] = max(eval(k), final_metric[k])
105
+
106
+ else:
107
+ normalized_pred_answer = normalize_answer(pred_answer)
108
+ normalized_ground_truth = normalize_answer(labeled_answer)
109
+
110
+ em = int(normalized_pred_answer == normalized_ground_truth)
111
+ acc = int(normalized_ground_truth in normalized_pred_answer)
112
+
113
+ prediction_tokens = normalized_pred_answer.split()
114
+ ground_truth_tokens = normalized_ground_truth.split()
115
+ common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
116
+ num_same = sum(common.values())
117
+ if num_same == 0:
118
+ f1 = 0
119
+ else:
120
+ precision = 1.0 * num_same / len(prediction_tokens) if len(prediction_tokens) > 0 else 0
121
+ recall = 1.0 * num_same / len(ground_truth_tokens) if len(ground_truth_tokens) > 0 else 0
122
+ if (precision + recall) == 0:
123
+ f1 = 0
124
+ else:
125
+ f1 = (2 * precision * recall) / (precision + recall)
126
+
127
+ final_metric["em"] = em
128
+ final_metric["acc"] = acc
129
+ final_metric["f1"] = f1
130
+
131
+ final_metric["math_equal"] = is_equiv(normalized_pred_answer, normalized_ground_truth)
132
+
133
+ # print(em, acc, f1, normalized_pred_answer, '|', normalized_ground_truth)
134
+ return final_metric, pred_answer
135
+
136
+
137
+
138
+ def run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split, apply_backoff=False):
139
+ if dataset_name == 'livecode':
140
+ # Prepare samples and generations for codegen_metrics
141
+ samples_list = []
142
+ generations_list = []
143
+
144
+ # Collect difficulty levels for per-domain metrics
145
+ difficulties = []
146
+ per_difficulty_count = {}
147
+ num_valid_answer = 0
148
+
149
+ for item, input_prompt, result in zip(filtered_data, input_list, output_list):
150
+ if type(result) == str:
151
+ item['Output'] = result
152
+ else:
153
+ item['Output'] = result.outputs[0].text
154
+ difficulty = item.get("difficulty", "Unknown")
155
+ difficulties.append(difficulty)
156
+ # Track metrics per domain
157
+ if difficulty not in per_difficulty_count.keys():
158
+ per_difficulty_count[difficulty] = 0
159
+
160
+ pred_code = extract_answer(item['Output'], mode='codegen')
161
+ if pred_code != '':
162
+ num_valid_answer += 1
163
+ per_difficulty_count[difficulty] += 1
164
+ # Assuming each item has 'input_output' with 'inputs' and 'outputs'
165
+ public_test_cases = json.loads(item.get("public_test_cases", "{}"))
166
+
167
+ inputs, outputs = [], []
168
+ for case in public_test_cases:
169
+ inputs.append(case["input"])
170
+ outputs.append(case["output"])
171
+
172
+ sample = {
173
+ "input_output": json.dumps({
174
+ "inputs": inputs,
175
+ "outputs": outputs
176
+ }),
177
+ }
178
+
179
+ samples_list.append(sample)
180
+ generations_list.append([pred_code])
181
+ item['Pred_Answer'] = pred_code
182
+ item['Question'] = input_prompt
183
+
184
+
185
+ # Call codegen_metrics with pass@1
186
+ metrics, results, final_metadata = codegen_metrics(
187
+ samples_list,
188
+ generations_list,
189
+ k_list=[1], # Evaluate the top 1 generated result
190
+ num_process_evaluate=2, # Parallel evaluation
191
+ timeout=10, # Set timeout to 10 seconds
192
+ debug=False, # Enable debug mode
193
+ )
194
+ # print('samples_list', samples_list)
195
+ # print('generations_list', generations_list)
196
+ # print('metrics', metrics)
197
+
198
+ # Extract pass@1
199
+ pass_at_1 = metrics.get('pass@1', 0.0)
200
+ detail_pass_at_1 = metrics['detail']['pass@1']
201
+
202
+ for item, pass1, res, meta in zip(filtered_data, detail_pass_at_1.values(), results.values(), final_metadata):
203
+ item['Metrics'] = {'pass@1': pass1}
204
+ item['Results'] = res
205
+ item['Final_metadata'] = meta
206
+
207
+ # Initialize per-difficulty metrics
208
+ difficulty_metrics = defaultdict(list)
209
+ for idx, difficulty in enumerate(difficulties):
210
+ pass1 = detail_pass_at_1[idx]
211
+ difficulty_metrics[difficulty].append(pass1)
212
+
213
+ # Compute overall pass@1
214
+ overall_metrics = {
215
+ 'pass@1': pass_at_1, # / num_valid_answer * len(input_list),
216
+ 'num_valid_answer': f'{num_valid_answer} of {len(input_list)}',
217
+ 'query_latency': f'{(total_time / len(input_list) * 1000):.0f} ms',
218
+ }
219
+
220
+ # Compute per-difficulty pass@1
221
+ per_difficulty_metrics = {}
222
+ for difficulty, passes in difficulty_metrics.items():
223
+ avg_pass = np.mean(passes) if len(passes) > 0 else 0.0
224
+ num_valid_answer = per_difficulty_count[difficulty]
225
+ per_difficulty_metrics[difficulty] = {
226
+ 'pass@1': avg_pass,
227
+ 'num_valid_answer': f'{num_valid_answer} of {len(passes)}'
228
+ }
229
+
230
+ # Save the metrics
231
+ final_metrics = {
232
+ 'overall': overall_metrics,
233
+ 'per_domain': per_difficulty_metrics
234
+ }
235
+
236
+ else:
237
+ # Existing evaluation for other datasets
238
+ avg_em, avg_acc, avg_f1, avg_math = [], [], [], []
239
+ num_valid_answer = 0
240
+
241
+ # If the dataset is GPQA, track metrics per domain
242
+ domain_metrics = {}
243
+
244
+ for item, input_prompt, result in zip(filtered_data, input_list, output_list):
245
+ if type(result) == str:
246
+ item['Output'] = result
247
+ else:
248
+ item['Output'] = result.outputs[0].text
249
+ if dataset_name in ['gpqa', 'medmcqa']:
250
+ labeled_answer = item["Correct Choice"]
251
+ # labeled_choice_answer = item["Correct Answer"]
252
+ mode = 'choose'
253
+ elif dataset_name in ['math500', 'aime', 'amc']:
254
+ labeled_answer = item["answer"]
255
+ mode = 'gen'
256
+ elif dataset_name in ['dpo_484', 'no_error_data_871', 'eval_old_500', 'gaia_level3', 'gaia', 'hle','frames', 'realqa', 'realqa_new', 'syn_en', 'syn_zh','musique_syn', 'eval', 'new', 'chinese_simpleqa', 'simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
257
+ labeled_answer = item["answer"]
258
+ mode = 'qa'
259
+ elif dataset_name in ['pubhealth']:
260
+ labeled_answer = item["answer"]
261
+ mode = 'choose'
262
+ else:
263
+ # raise ValueError(f"Unknown dataset_name: {dataset_name}")
264
+ labeled_answer = item["answer"]
265
+ mode = 'qa'
266
+
267
+ metric, pred_answer = evaluate_predictions(output=item['Output'], labeled_answer=labeled_answer, mode=mode)
268
+ item['Pred_Answer'] = pred_answer
269
+ item['Metrics'] = metric
270
+ item['Question'] = input_prompt
271
+
272
+ # Determine the validity of the predicted answer
273
+ my_method_valid = (pred_answer != '' and not (mode == 'choose' and dataset_name == 'gpqa' and len(pred_answer) > 1))
274
+
275
+ avg_em.append(metric['em'])
276
+ avg_acc.append(metric['acc'])
277
+ avg_f1.append(metric['f1'])
278
+ avg_math.append(metric['math_equal'])
279
+
280
+ if my_method_valid:
281
+ num_valid_answer += 1
282
+
283
+ # If the dataset is GPQA, attempt to track metrics per domain
284
+ if dataset_name == 'gpqa':
285
+ domain = item.get("High-level domain", "Unknown")
286
+ if domain not in domain_metrics:
287
+ domain_metrics[domain] = {'em': [], 'acc': [], 'f1': [], 'math_equal': [], 'num_valid_answer': 0, 'total_num': 0}
288
+ domain_metrics[domain]['total_num'] += 1
289
+ domain_metrics[domain]['em'].append(metric['em'])
290
+ domain_metrics[domain]['acc'].append(metric['acc'])
291
+ domain_metrics[domain]['f1'].append(metric['f1'])
292
+ domain_metrics[domain]['math_equal'].append(metric['math_equal'])
293
+ if my_method_valid:
294
+ domain_metrics[domain]['num_valid_answer'] += 1
295
+
296
+ t = time.localtime()
297
+ result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
298
+ metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
299
+
300
+ # Compute overall metrics
301
+ overall_results = {
302
+ 'em': np.mean(avg_em) if len(avg_em) > 0 else 0.0,
303
+ 'acc': np.mean(avg_acc) if len(avg_acc) > 0 else 0.0,
304
+ 'f1': np.mean(avg_f1) if len(avg_f1) > 0 else 0.0,
305
+ 'math_equal': np.mean(avg_math) if len(avg_em) > 0 else 0.0,
306
+ 'num_valid_answer': f'{num_valid_answer} of {len(input_list)}',
307
+ 'query_latency': f'{(total_time / len(input_list) * 1000):.0f} ms',
308
+ }
309
+
310
+ # If the dataset is GPQA, output average metrics per domain
311
+ domain_avg_metrics = {}
312
+ if dataset_name == 'gpqa':
313
+ for dm, m in domain_metrics.items():
314
+ domain_avg_metrics[dm] = {
315
+ 'em': np.mean(m['em']) if len(m['em']) > 0 else 0,
316
+ 'acc': np.mean(m['acc']) if len(m['acc']) > 0 else 0,
317
+ 'f1': np.mean(m['f1']) if len(m['f1']) > 0 else 0,
318
+ 'math_equal': np.mean(m['math_equal']) if len(m['math_equal']) > 0 else 0,
319
+ 'num_valid_answer': f'{m["num_valid_answer"]} of {m["total_num"]}'
320
+ }
321
+
322
+ # 保存总体和分domain的指标
323
+ final_metrics = {'overall': overall_results}
324
+ if dataset_name == 'gpqa':
325
+ final_metrics['per_domain'] = domain_avg_metrics
326
+
327
+ t = time.localtime()
328
+ result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
329
+ metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
330
+ if apply_backoff:
331
+ result_json_name = output_dir
332
+ metrics_json_name = output_dir.replace('.json', '.metrics.backoff.json')
333
+
334
+ # Save prediction results and metrics
335
+ with open(os.path.join(output_dir, result_json_name), mode='w', encoding='utf-8') as json_file:
336
+ json.dump(filtered_data, json_file, indent=4, ensure_ascii=False)
337
+
338
+ with open(os.path.join(output_dir, metrics_json_name), mode='w', encoding='utf-8') as json_file:
339
+ json.dump(final_metrics, json_file, indent=4, ensure_ascii=False)
340
+
341
+
342
+
343
+ def run_evaluation_for_eval(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split, apply_backoff=False):
344
+ if dataset_name not in ['dpo_484', 'no_error_data_871', 'eval_old_500', 'gaia_level3', 'gaia', 'hle','eval', 'musique_syn', 'realqa_new', 'realqa']:
345
+ raise ValueError(f"Unsupported dataset: {dataset_name}")
346
+ else:
347
+ # Existing evaluation for other datasets
348
+ avg_em, avg_acc, avg_f1, avg_math = [], [], [], []
349
+ num_valid_answer = 0
350
+
351
+ # If the dataset is eval, track metrics per source
352
+ source_metrics = {}
353
+
354
+
355
+
356
+ for item, input_prompt, result in zip(filtered_data, input_list, output_list):
357
+ if type(result) == str:
358
+ item['Output'] = result
359
+ else:
360
+ item['Output'] = result.outputs[0].text
361
+ if dataset_name in ['gpqa', 'medmcqa']:
362
+ labeled_answer = item["Correct Choice"]
363
+ # labeled_choice_answer = item["Correct Answer"]
364
+ mode = 'choose'
365
+ elif dataset_name in ['math500', 'aime', 'amc']:
366
+ labeled_answer = item["answer"]
367
+ mode = 'gen'
368
+ elif dataset_name in ['dpo_484', 'no_error_data_871', 'eval_old_500', 'gaia_level3', 'gaia', 'hle','frames', 'realqa', 'realqa_new', 'syn_en', 'syn_zh', 'eval','musique_syn', 'new', 'chinese_simpleqa', 'simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
369
+ labeled_answer = item["answer"]
370
+ mode = 'qa'
371
+ elif dataset_name in ['pubhealth']:
372
+ labeled_answer = item["answer"]
373
+ mode = 'choose'
374
+ else:
375
+ raise ValueError(f"Unknown dataset_name: {dataset_name}")
376
+
377
+ metric, pred_answer = evaluate_predictions(output=item['Output'], labeled_answer=labeled_answer, mode=mode)
378
+ item['Pred_Answer'] = pred_answer
379
+ item['Metrics'] = metric
380
+ item['Question'] = input_prompt
381
+
382
+ # Determine the validity of the predicted answer
383
+ my_method_valid = (pred_answer != '' and not (mode == 'choose' and dataset_name == 'gpqa' and len(pred_answer) > 1))
384
+
385
+ avg_em.append(metric['em'])
386
+ avg_acc.append(metric['acc'])
387
+ avg_f1.append(metric['f1'])
388
+ avg_math.append(metric['math_equal'])
389
+
390
+ if my_method_valid:
391
+ num_valid_answer += 1
392
+
393
+ # If the dataset is GPQA, attempt to track metrics per source
394
+ if dataset_name in ['dpo_484', 'no_error_data_871', 'eval_old_500', 'gaia_level3', 'gaia', 'hle','eval', 'musique_syn', 'realqa_new', 'realqa']:
395
+ source = item.get("source", "Unknown")
396
+ if source not in source_metrics:
397
+ source_metrics[source] = {'em': [], 'acc': [], 'f1': [], 'math_equal': [], 'num_valid_answer': 0, 'total_num': 0}
398
+ source_metrics[source]['total_num'] += 1
399
+ source_metrics[source]['em'].append(metric['em'])
400
+ source_metrics[source]['acc'].append(metric['acc'])
401
+ source_metrics[source]['f1'].append(metric['f1'])
402
+ source_metrics[source]['math_equal'].append(metric['math_equal'])
403
+ if my_method_valid:
404
+ source_metrics[source]['num_valid_answer'] += 1
405
+
406
+ t = time.localtime()
407
+ result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
408
+ metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
409
+
410
+ # Compute overall metrics
411
+ overall_results = {
412
+ 'em': np.mean(avg_em) if len(avg_em) > 0 else 0.0,
413
+ 'acc': np.mean(avg_acc) if len(avg_acc) > 0 else 0.0,
414
+ 'f1': np.mean(avg_f1) if len(avg_f1) > 0 else 0.0,
415
+ 'math_equal': np.mean(avg_math) if len(avg_em) > 0 else 0.0,
416
+ 'num_valid_answer': f'{num_valid_answer} of {len(input_list)}',
417
+ 'query_latency': f'{(total_time / len(input_list) * 1000):.0f} ms',
418
+ }
419
+
420
+ # If the dataset is eval, output average metrics per source
421
+ source_avg_metrics = {}
422
+ if dataset_name in ['dpo_484', 'no_error_data_871', 'eval_old_500', 'gaia_level3', 'gaia', 'hle','eval', 'musique_syn', 'realqa_new', 'realqa']:
423
+ for dm, m in source_metrics.items():
424
+ source_avg_metrics[dm] = {
425
+ 'em': np.mean(m['em']) if len(m['em']) > 0 else 0,
426
+ 'acc': np.mean(m['acc']) if len(m['acc']) > 0 else 0,
427
+ 'f1': np.mean(m['f1']) if len(m['f1']) > 0 else 0,
428
+ 'math_equal': np.mean(m['math_equal']) if len(m['math_equal']) > 0 else 0,
429
+ 'num_valid_answer': f'{m["num_valid_answer"]} of {m["total_num"]}'
430
+ }
431
+
432
+ # 保存总体和分source的指标
433
+ final_metrics = {'overall': overall_results}
434
+ if dataset_name in ['dpo_484', 'no_error_data_871', 'eval_old_500', 'gaia_level3', 'gaia', 'hle', 'eval', 'musique_syn', 'realqa_new', 'realqa']:
435
+ final_metrics['per_source'] = source_avg_metrics
436
+
437
+ t = time.localtime()
438
+ result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
439
+ metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
440
+ if apply_backoff:
441
+ result_json_name = output_dir
442
+ metrics_json_name = output_dir.replace('.json', '.metrics.backoff.json')
443
+
444
+ # Save prediction results and metrics
445
+ with open(os.path.join(output_dir, result_json_name), mode='w', encoding='utf-8') as json_file:
446
+ json.dump(filtered_data, json_file, indent=4, ensure_ascii=False)
447
+
448
+ with open(os.path.join(output_dir, metrics_json_name), mode='w', encoding='utf-8') as json_file:
449
+ json.dump(final_metrics, json_file, indent=4, ensure_ascii=False)
450
+
451
+
452
+
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/google_search.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ from requests.exceptions import Timeout
5
+ from bs4 import BeautifulSoup
6
+ from tqdm import tqdm
7
+ import time
8
+ import concurrent
9
+ from concurrent.futures import ThreadPoolExecutor
10
+ import pdfplumber
11
+ from io import BytesIO
12
+ import re
13
+ import string
14
+ from typing import Optional, Tuple
15
+ from nltk.tokenize import sent_tokenize
16
+
17
+ # os.environ['http_proxy'] = 'http://127.0.0.1:7890'
18
+ # os.environ['https_proxy'] = 'http://127.0.0.1:7890'
19
+
20
+
21
+ # ----------------------- Custom Headers -----------------------
22
+ headers = {
23
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
24
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
25
+ 'Chrome/58.0.3029.110 Safari/537.36',
26
+ 'Referer': 'https://www.google.com/',
27
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
28
+ 'Accept-Language': 'en-US,en;q=0.5',
29
+ 'Connection': 'keep-alive',
30
+ 'Upgrade-Insecure-Requests': '1'
31
+ }
32
+
33
+ # Initialize session
34
+ session = requests.Session()
35
+ session.headers.update(headers)
36
+
37
+
38
+
39
+ def remove_punctuation(text: str) -> str:
40
+ """Remove punctuation from the text."""
41
+ return text.translate(str.maketrans("", "", string.punctuation))
42
+
43
+ def f1_score(true_set: set, pred_set: set) -> float:
44
+ """Calculate the F1 score between two sets of words."""
45
+ intersection = len(true_set.intersection(pred_set))
46
+ if not intersection:
47
+ return 0.0
48
+ precision = intersection / float(len(pred_set))
49
+ recall = intersection / float(len(true_set))
50
+ return 2 * (precision * recall) / (precision + recall)
51
+
52
+ def extract_snippet_with_context(full_text: str, snippet: str, context_chars: int = 2500) -> Tuple[bool, str]:
53
+ """
54
+ Extract the sentence that best matches the snippet and its context from the full text.
55
+
56
+ Args:
57
+ full_text (str): The full text extracted from the webpage.
58
+ snippet (str): The snippet to match.
59
+ context_chars (int): Number of characters to include before and after the snippet.
60
+
61
+ Returns:
62
+ Tuple[bool, str]: The first element indicates whether extraction was successful, the second element is the extracted context.
63
+ # 这个 extract_snippet_with_context 函数的作用是 从一段长文本中找到最符合给定片段(snippet)的句子,并返回包含该句子的一定上下文范围的文本。它的核心逻辑包括 文本预处理、句子匹配、F1 评分计算、上下文截取 等几个步骤。
64
+ """
65
+ try:
66
+ full_text = full_text[:50000]
67
+
68
+ snippet = snippet.lower()
69
+ snippet = remove_punctuation(snippet)
70
+ snippet_words = set(snippet.split())
71
+
72
+ best_sentence = None
73
+ best_f1 = 0.2
74
+
75
+ # sentences = re.split(r'(?<=[.!?]) +', full_text) # Split sentences using regex, supporting ., !, ? endings
76
+ sentences = sent_tokenize(full_text) # Split sentences using nltk's sent_tokenize
77
+
78
+ for sentence in sentences:
79
+ key_sentence = sentence.lower()
80
+ key_sentence = remove_punctuation(key_sentence)
81
+ sentence_words = set(key_sentence.split())
82
+ f1 = f1_score(snippet_words, sentence_words)
83
+ if f1 > best_f1:
84
+ best_f1 = f1
85
+ best_sentence = sentence
86
+
87
+ if best_sentence:
88
+ para_start = full_text.find(best_sentence)
89
+ para_end = para_start + len(best_sentence)
90
+ start_index = max(0, para_start - context_chars)
91
+ end_index = min(len(full_text), para_end + context_chars)
92
+ context = full_text[start_index:end_index]
93
+ return True, context
94
+ else:
95
+ # If no matching sentence is found, return the first context_chars*2 characters of the full text
96
+ return False, full_text[:context_chars * 2]
97
+ except Exception as e:
98
+ return False, f"Failed to extract snippet context due to {str(e)}"
99
+
100
+ def extract_text_from_url(url, use_jina=False, jina_api_key=None, snippet: Optional[str] = None):
101
+ """
102
+ Extract text from a URL. If a snippet is provided, extract the context related to it.
103
+
104
+ Args:
105
+ url (str): URL of a webpage or PDF.
106
+ use_jina (bool): Whether to use Jina for extraction.
107
+ snippet (Optional[str]): The snippet to search for.
108
+
109
+ Returns:
110
+ str: Extracted text or context.
111
+ """
112
+ try:
113
+ # print(f"extract_text_from_url use_jina: {use_jina}\n")
114
+ if use_jina:
115
+ jina_headers = {
116
+ 'Authorization': f'Bearer {jina_api_key}',
117
+ 'X-Return-Format': 'markdown',
118
+ # 'X-With-Links-Summary': 'true'
119
+ }
120
+ response = requests.get(f'https://r.jina.ai/{url}', headers=jina_headers).text
121
+ # Remove URLs
122
+ pattern = r"\(https?:.*?\)|\[https?:.*?\]"
123
+ text = re.sub(pattern, "", response).replace('---','-').replace('===','=').replace(' ',' ').replace(' ',' ')
124
+ print("use jina to extract text successfully")
125
+ else:
126
+ # print(f"don't use jina to extract text")
127
+ response = session.get(url, timeout=20) # Set timeout to 20 seconds
128
+ response.raise_for_status() # Raise HTTPError if the request failed
129
+ # Determine the content type
130
+ content_type = response.headers.get('Content-Type', '')
131
+ if 'pdf' in content_type:
132
+ # If it's a PDF file, extract PDF text
133
+ print("Extracting text from PDF...")
134
+ return extract_pdf_text(url)
135
+ # Try using lxml parser, fallback to html.parser if unavailable
136
+ try:
137
+ # print("use lxml parser to extract text")
138
+ soup = BeautifulSoup(response.text, 'lxml')
139
+ except Exception:
140
+ print("lxml parser not found or failed, falling back to html.parser")
141
+ soup = BeautifulSoup(response.text, 'html.parser')
142
+ text = soup.get_text(separator=' ', strip=True)
143
+
144
+ if snippet:
145
+ success, context = extract_snippet_with_context(text, snippet)
146
+ if success:
147
+ print("use extract_snippet_with_context to extract text successfully")
148
+ return context
149
+ else:
150
+ print("use extract_snippet_with_context to extract text failed")
151
+ return text
152
+ else:
153
+ # print("no snippet provided")
154
+ # If no snippet is provided, return directly
155
+ return text[:8000]
156
+ # return text[:10000]
157
+ except requests.exceptions.HTTPError as http_err:
158
+ return f"HTTP error occurred: {http_err}"
159
+ except requests.exceptions.ConnectionError:
160
+ return "Error: Connection error occurred"
161
+ except requests.exceptions.Timeout:
162
+ return "Error: Request timed out after 20 seconds"
163
+ except Exception as e:
164
+ return f"Unexpected error: {str(e)}"
165
+
166
+ def fetch_page_content(urls, max_workers=24, use_jina=False, jina_api_key=None, snippets: Optional[dict] = None):
167
+ """
168
+ Concurrently fetch content from multiple URLs.
169
+
170
+ Args:
171
+ urls (list): List of URLs to scrape.
172
+ max_workers (int): Maximum number of concurrent threads.
173
+ use_jina (bool): Whether to use Jina for extraction.
174
+ snippets (Optional[dict]): A dictionary mapping URLs to their respective snippets.
175
+
176
+ Returns:
177
+ dict: A dictionary mapping URLs to the extracted content or context.
178
+ """
179
+ results = {}
180
+ max_workers=20
181
+ print(f"max_workers: {max_workers}")
182
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
183
+ # Use tqdm to display a progress bar
184
+ futures = {
185
+ executor.submit(extract_text_from_url, url, use_jina, jina_api_key,snippets.get(url) if snippets else None): url
186
+ for url in urls
187
+ }
188
+ for future in tqdm(concurrent.futures.as_completed(futures), desc="Fetching URLs", total=len(urls)):
189
+ url = futures[future]
190
+ # try:
191
+ # data = future.result()
192
+ # results[url] = data
193
+ # except Exception as exc:
194
+ # results[url] = f"Error fetching {url}: {exc}"
195
+ # time.sleep(0.2) # Simple rate limiting
196
+
197
+ for _ in range(5): # max retry 5 times
198
+ try:
199
+ data = future.result()
200
+ results[url] = data
201
+ break
202
+ except Exception as exc:
203
+ results[url] = f"Error fetching {url}: {exc}"
204
+ time.sleep(0.2)
205
+ return results
206
+
207
+
208
+ proxies = {
209
+ "http": "http://127.0.0.1:7890",
210
+ "https": "http://127.0.0.1:7890"
211
+ }
212
+
213
+
214
+ def google_web_search(query, subscription_key, endpoint, market='en-US', language='en', exclude_urls=[],timeout=2000):
215
+ """
216
+ Perform a search using the Bing Web Search API with a set timeout.
217
+
218
+ Args:
219
+ query (str): Search query.
220
+ subscription_key (str): Subscription key for the Bing Search API.
221
+ endpoint (str): Endpoint for the Bing Search API.
222
+ market (str): Market, e.g., "en-US" or "zh-CN".
223
+ language (str): Language of the results, e.g., "en".
224
+ timeout (int or float or tuple): Request timeout in seconds.
225
+ Can be a float representing the total timeout,
226
+ or a tuple (connect timeout, read timeout).
227
+
228
+ Returns:
229
+ dict: JSON response of the search results. Returns None or raises an exception if the request times out.
230
+ 函数的目标是使用 Bing Web Search API 执行搜索,并返回 JSON 格式的结果。
231
+ 如果请求超时或出现其他问题,返回空字典({})或抛出异常
232
+ """
233
+
234
+ if exclude_urls:
235
+
236
+ for site in exclude_urls:
237
+ query += f" -site:{site}"
238
+ print(f"qeury: {query}, exclude_urls: {exclude_urls}")
239
+ # query = query + " site:en.wikipedia.org"
240
+ # print(f"query: {query}")
241
+ payload = json.dumps({
242
+ "q": query, # 设��查询内容
243
+ "num": 11,
244
+ "mkt": market, # 设置市场
245
+ "setLang": language, # 设置语言
246
+ "textDecorations": True, # 启用文本装饰
247
+ "textFormat": "HTML" # 设置文本格式
248
+ })
249
+ print(f"query: {query}")
250
+
251
+ headers = {
252
+ 'X-API-KEY': subscription_key,
253
+ 'Content-Type': 'application/json'
254
+ }
255
+ error_cnt = 0
256
+ while True:
257
+ if error_cnt == 20:
258
+ print(f"qery: {query} has tried {error_cnt} times without success, just skip it.")
259
+ break
260
+ try:
261
+ # 发送POST请求
262
+ response = requests.request("POST", endpoint, headers=headers, data=payload, proxies=proxies, timeout=timeout)
263
+ # response = requests.request("POST", endpoint, headers=headers, data=payload, timeout=timeout)
264
+ response.raise_for_status() # Raise exception if the request failed 检查响应的状态码。如果返回的状态码是 4xx 或 5xx(表示客户端或服务器错误),它将引发 requests.exceptions.HTTPError 异常
265
+ search_results = response.json() #
266
+ return search_results
267
+ except Timeout:
268
+ error_cnt += 1
269
+ print(f"error_cnt: {error_cnt}, Bing Web Search request timed out ({timeout} seconds) for query: {query}")
270
+ time.sleep(5)
271
+ # return {} # Or you can choose to raise an exception
272
+ except requests.exceptions.RequestException as e:
273
+ error_cnt += 1
274
+ print(f"error_cnt: {error_cnt}, Error occurred during Bing Web Search request: {e}, payload: {payload}")
275
+ time.sleep(5)
276
+ # return {}
277
+
278
+
279
+ def extract_pdf_text(url):
280
+ """
281
+ Extract text from a PDF.
282
+
283
+ Args:
284
+ url (str): URL of the PDF file.
285
+
286
+ Returns:
287
+ str: Extracted text content or error message.
288
+ """
289
+ try:
290
+ response = session.get(url, timeout=20) # Set timeout to 20 seconds
291
+ if response.status_code != 200:
292
+ return f"Error: Unable to retrieve the PDF (status code {response.status_code})"
293
+
294
+ # Open the PDF file using pdfplumber
295
+ with pdfplumber.open(BytesIO(response.content)) as pdf:
296
+ full_text = ""
297
+ for page in pdf.pages:
298
+ text = page.extract_text()
299
+ if text:
300
+ full_text += text
301
+
302
+ # Limit the text length
303
+ cleaned_text = ' '.join(full_text.split()[:600])
304
+ return cleaned_text
305
+ except requests.exceptions.Timeout:
306
+ return "Error: Request timed out after 20 seconds"
307
+ except Exception as e:
308
+ return f"Error: {str(e)}"
309
+
310
+ # def extract_relevant_info(search_results):
311
+ # """
312
+ # Extract relevant information from Bing search results.
313
+
314
+ # Args:
315
+ # search_results (dict): JSON response from the Bing Web Search API.
316
+
317
+ # Returns:
318
+ # list: A list of dictionaries containing the extracted information.
319
+ # """
320
+ # useful_info = []
321
+
322
+ # if 'webPages' in search_results and 'value' in search_results['webPages']: # value 通常是一个列表,包含了搜索结果的每个页面信息
323
+ # for id, result in enumerate(search_results['webPages']['value']):
324
+ # info = {
325
+ # 'id': id + 1, # Increment id for easier subsequent operations 为每个结果分配一个 id,id + 1 是为了让 ID 从 1 开始,而不是从 0 开始。这对后续操作更直观
326
+ # 'title': result.get('name', ''), # 每个搜索结果中提取标题
327
+ # 'url': result.get('url', ''), # 每个搜索结果中提取 URL
328
+ # 'site_name': result.get('siteName', ''), # 每个搜索结果中提取站点名称
329
+ # 'date': result.get('datePublished', '').split('T')[0], # 提取搜索结果的发布时间
330
+ # 'snippet': result.get('snippet', ''), # Remove HTML tags : 提取搜索结果的简短描述(即摘要或片段),result.get('snippet', '')。这里的 snippet 可能包含 HTML 标签,因此需要在后续的处理中可能会清除这些标签
331
+ # # Add context content to the information
332
+ # 'context': '' # Reserved field to be filled later
333
+ # }
334
+ # useful_info.append(info)
335
+
336
+ # return useful_info
337
+
338
+ def extract_relevant_info(search_results):
339
+ """
340
+ Extract relevant information from Bing search results.
341
+
342
+ Args:
343
+ search_results (dict): JSON response from the Bing Web Search API.
344
+
345
+ Returns:
346
+ list: A list of dictionaries containing the extracted information.
347
+ """
348
+ useful_info = []
349
+
350
+ if search_results == None:
351
+ return useful_info
352
+
353
+ if 'organic' in search_results : # value 通常是一个列表,包含了搜索结果的每个页面信息
354
+ for id, result in enumerate(search_results['organic']):
355
+ info = {
356
+ 'id': id + 1, # Increment id for easier subsequent operations 为每个结果分配一个 id,id + 1 是为了让 ID 从 1 开始,而不是从 0 开始。这对后续操作更直观
357
+ 'title': result.get('title', ''), # 每个搜索结果中提取标题
358
+ 'url': result.get('link', ''), # 每个搜索结果中提取 URL
359
+ 'site_name': result.get('siteName', ''), # 每个搜索结果中提取站点名称
360
+ 'date': result.get('datePublished', '').split('T')[0], # 提取搜索结果的发布时间
361
+ 'snippet': result.get('snippet', ''), # Remove HTML tags : 提取搜索结果的简短描述(即摘要或片段),result.get('snippet', '')。这里的 snippet 可能包含 HTML 标签,因此需要在后续的处理中可能会清除这些标签
362
+ # Add context content to the information
363
+ 'context': '' # Reserved field to be filled later
364
+ }
365
+ useful_info.append(info)
366
+ else:
367
+ print("No organic results found.")
368
+ print(f"len of useful_info: {len(useful_info)}")
369
+ return useful_info
370
+
371
+
372
+ # ------------------------------------------------------------
373
+
374
+ if __name__ == "__main__":
375
+ # Example usage
376
+ # Define the query to search
377
+ # query = "Structure of dimethyl fumarate"
378
+
379
+ # # Subscription key and endpoint for Bing Search API
380
+ # BING_SUBSCRIPTION_KEY = "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d"
381
+ # if not BING_SUBSCRIPTION_KEY:
382
+ # raise ValueError("Please set the BING_SEARCH_V7_SUBSCRIPTION_KEY environment variable.")
383
+
384
+ # bing_endpoint = "https://google.serper.dev/search"
385
+
386
+ # # Perform the search
387
+ # print("Performing Bing Web Search...")
388
+ # search_results = bing_web_search(query, BING_SUBSCRIPTION_KEY, bing_endpoint)
389
+ # print(search_results)
390
+
391
+
392
+
393
+ result = bing_web_search("when does season 14 of grey's anatomy come out", 'cb0d28279a826d7e5cf22d71f683c77ffd4ba27d', 'https://google.serper.dev/search')
394
+
395
+ print(result)
396
+ # print("Extracting relevant information from search results...")
397
+ # extracted_info = extract_relevant_info(search_results)
398
+
399
+ # print("Fetching and extracting context for each snippet...")
400
+ # for info in tqdm(extracted_info, desc="Processing Snippets"):
401
+ # full_text = extract_text_from_url(info['url'], use_jina=False, jina_api_key="jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ", snippet=info["snippet"]) # Get full webpage text
402
+ # if full_text and not full_text.startswith("Error"):
403
+ # success, context = extract_snippet_with_context(full_text, info['snippet'])
404
+ # if success:
405
+ # info['context'] = context
406
+ # print("-------------------")
407
+ # print(f"Snippet: {info['snippet']}\nContext: {context}")
408
+
409
+ # else:
410
+ # info['context'] = f"Could not extract context. Returning first 8000 chars: {full_text[:8000]}"
411
+ # else:
412
+ # info['context'] = f"Failed to fetch full text: {full_text}"
413
+
414
+ # print("Your Search Query:", query)
415
+ # print("Final extracted information with context:")
416
+ # print(json.dumps(extracted_info, indent=2, ensure_ascii=False))
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/inference.py ADDED
@@ -0,0 +1,774 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import re
5
+ from tqdm import tqdm
6
+ import numpy as np
7
+ import torch
8
+ import string
9
+ from typing import Optional, Tuple, List, Dict
10
+ import argparse
11
+ from functools import partial
12
+
13
+ import multiprocessing
14
+ from transformers import AutoTokenizer
15
+ from vllm import LLM, SamplingParams
16
+
17
+ from google_search import (
18
+ google_web_search,
19
+ extract_relevant_info,
20
+ fetch_page_content,
21
+ extract_snippet_with_context
22
+ )
23
+ from evaluate import (
24
+ run_evaluation,
25
+ run_evaluation_for_eval,
26
+ extract_answer
27
+ )
28
+ from prompts import (
29
+ get_multiqa_instruction,
30
+ get_math_instruction,
31
+ get_task_instruction_openqa,
32
+ get_task_instruction_math,
33
+ get_webpage_to_reasonchain_instruction
34
+ )
35
+ from functools import partial
36
+ from openai import OpenAI
37
+
38
+ from stage_wise_analysis import stage_wise_analysis
39
+
40
+ # Define special tokens
41
+ BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
42
+ END_SEARCH_QUERY = "<|end_search_query|>"
43
+ BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
44
+ END_SEARCH_RESULT = "<|end_search_result|>"
45
+
46
+
47
+ def parse_args():
48
+ parser = argparse.ArgumentParser(description="Run SimpleDeepsearcer for various datasets.")
49
+
50
+
51
+ parser.add_argument(
52
+ '--dataset_name',
53
+ type=str,
54
+ required=True,
55
+ help="Name of the dataset to use."
56
+ )
57
+
58
+ parser.add_argument(
59
+ '--subset_num',
60
+ type=int,
61
+ default=-1,
62
+ help="Number of examples to process. Defaults to all if not specified."
63
+ )
64
+
65
+ # Search and document retrieval configuration
66
+ parser.add_argument(
67
+ '--max_search_limit',
68
+ type=int,
69
+ default=10,
70
+ help="Maximum number of searches per question."
71
+ )
72
+
73
+ parser.add_argument(
74
+ '--max_turn',
75
+ type=int,
76
+ default=15,
77
+ help="Maximum number of turns."
78
+ )
79
+
80
+ parser.add_argument(
81
+ '--top_k',
82
+ type=int,
83
+ default=10,
84
+ help="Maximum number of search documents to return."
85
+ )
86
+
87
+ parser.add_argument(
88
+ '--max_doc_len',
89
+ type=int,
90
+ default=3000,
91
+ help="Maximum length of each searched document."
92
+ )
93
+
94
+ # Model configuration
95
+ parser.add_argument(
96
+ '--model_path',
97
+ type=str,
98
+ required=True,
99
+ help="Path to the reasoning model."
100
+ )
101
+
102
+ # Sampling parameters
103
+ parser.add_argument(
104
+ '--temperature',
105
+ type=float,
106
+ default=0.6,
107
+ help="Sampling temperature."
108
+ )
109
+
110
+ parser.add_argument(
111
+ '--top_p',
112
+ type=float,
113
+ default=0.95,
114
+ help="Top-p sampling parameter."
115
+ )
116
+
117
+ parser.add_argument(
118
+ '--top_k_sampling',
119
+ type=int,
120
+ default=40,
121
+ help="Top-k sampling parameter."
122
+ )
123
+
124
+ parser.add_argument(
125
+ '--max_tokens',
126
+ type=int,
127
+ default=20480,
128
+ help="Maximum number of tokens to generate."
129
+ )
130
+
131
+ # Bing API Configuration
132
+ parser.add_argument(
133
+ '--google_subscription_key',
134
+ type=str,
135
+ required=True,
136
+ help="Google Search API subscription key."
137
+ )
138
+
139
+ parser.add_argument(
140
+ '--google_endpoint',
141
+ type=str,
142
+ default="https://google.serper.dev/search",
143
+ help="Google Search API endpoint."
144
+ )
145
+
146
+ parser.add_argument(
147
+ '--cache_dir_base',
148
+ type=str,
149
+ required=True,
150
+ help="cache path."
151
+ )
152
+
153
+ parser.add_argument(
154
+ '--output_dir_base',
155
+ type=str,
156
+ required=True,
157
+ help="output_dir"
158
+ )
159
+
160
+ parser.add_argument(
161
+ '--is_exclude_urls',
162
+ action="store_true",
163
+ help="is_exclude_urls"
164
+ )
165
+
166
+
167
+ parser.add_argument(
168
+ '--summarization_model_path',
169
+ type=str,
170
+ required=True,
171
+ help="Path to the summary model."
172
+ )
173
+
174
+
175
+ parser.add_argument(
176
+ '--summarization_model_url',
177
+ type=str,
178
+ required=True,
179
+ help="Base url of the summarization model."
180
+ )
181
+
182
+ return parser.parse_args()
183
+
184
+
185
+
186
+ def webpage_analysis_single(summ_model_url, summ_model_path, prompt) -> str:
187
+ client_summ_model = OpenAI(
188
+ base_url=summ_model_url,
189
+ api_key="EMPTY"
190
+ )
191
+ for i in range(10): # max retry 10 times
192
+ try:
193
+ completion = client_summ_model.chat.completions.create(
194
+ model=summ_model_path,
195
+ max_tokens=8192,
196
+ temperature=0.6,
197
+ top_p=0.95,
198
+ messages=[prompt],
199
+ )
200
+ return completion.choices[0].message.content
201
+ except Exception as e:
202
+ print(e)
203
+ time.sleep(1)
204
+ continue
205
+ return "None"
206
+
207
+ def main():
208
+ args = parse_args()
209
+ # Extract arguments
210
+ dataset_name = args.dataset_name
211
+ subset_num = args.subset_num
212
+ MAX_SEARCH_LIMIT = args.max_search_limit
213
+ MAX_TURN = args.max_turn
214
+ top_k = args.top_k
215
+ max_doc_len = args.max_doc_len
216
+ model_path = args.model_path
217
+ summ_model_path = args.summarization_model_path
218
+ summ_model_url = args.summarization_model_url
219
+ temperature = args.temperature
220
+ top_p = args.top_p
221
+ top_k_sampling = args.top_k_sampling
222
+ max_tokens = args.max_tokens
223
+ google_subscription_key = args.google_subscription_key
224
+ google_endpoint = args.google_endpoint
225
+ cache_dir_base = args.cache_dir_base
226
+ output_dir_base = args.output_dir_base
227
+ is_exclude_urls = args.is_exclude_urls
228
+
229
+ print(f"CUDA_VISIBLE_DEVICES is set to: {os.environ['CUDA_VISIBLE_DEVICES']}")
230
+
231
+ # Data paths based on dataset
232
+ data_path = f"./data/{dataset_name}.json"
233
+
234
+ print('-----------------------')
235
+ print(f'Using {dataset_name} set.')
236
+ print('-----------------------')
237
+
238
+ # ---------------------- Caching Mechanism ----------------------
239
+ # Define cache directories and file paths
240
+ model_name = model_path.split('/')[-1].replace('-instruct', '')
241
+ cache_dir = cache_dir_base
242
+ search_cache_path = os.path.join(cache_dir, 'search_cache.json')
243
+ url_cache_path = os.path.join(cache_dir, 'url_cache.json')
244
+
245
+ # Ensure cache directory exists
246
+ os.makedirs(cache_dir, exist_ok=True)
247
+
248
+ # Load existing caches or initialize empty dictionaries
249
+ if os.path.exists(search_cache_path):
250
+ try:
251
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
252
+ search_cache = json.load(f)
253
+ except Exception as e:
254
+ print(f"load search_cache.json error: {e}")
255
+ search_cache = {}
256
+
257
+ else:
258
+ search_cache = {}
259
+
260
+ if os.path.exists(url_cache_path):
261
+ try:
262
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
263
+ url_cache = json.load(f)
264
+ except Exception as e:
265
+ print(f"load url_cache.json error: {e}")
266
+ url_cache = {}
267
+
268
+ else:
269
+ url_cache = {}
270
+
271
+ # Function to save caches
272
+ def save_caches():
273
+ with open(search_cache_path, 'w', encoding='utf-8') as f:
274
+ json.dump(search_cache, f, ensure_ascii=False, indent=2)
275
+ with open(url_cache_path, 'w', encoding='utf-8') as f:
276
+ json.dump(url_cache, f, ensure_ascii=False, indent=2)
277
+
278
+ # ---------------------- Reasoning Model Loading ----------------------
279
+ print(f"Loading tokenizer from {model_path}...")
280
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
281
+ if tokenizer.pad_token is None:
282
+ tokenizer.pad_token = tokenizer.eos_token
283
+ tokenizer.padding_side = 'left'
284
+ print("Tokenizer loaded successfully.")
285
+
286
+ # Define output directory based on the dataset
287
+ output_dir = os.path.join(output_dir_base, dataset_name)
288
+ os.makedirs(output_dir, exist_ok=True)
289
+
290
+ print(f"Loading model from {model_path}...")
291
+ print(f"device_count: {torch.cuda.device_count()}")
292
+
293
+ # Initialize the LLM
294
+ llm = LLM(
295
+ model=model_path,
296
+ tensor_parallel_size=torch.cuda.device_count(),
297
+ gpu_memory_utilization=0.95,
298
+
299
+ )
300
+ print("Model loaded successfully.")
301
+
302
+ # ---------------------- Data Loading ----------------------
303
+ print(f"Loading data from {data_path}...")
304
+ with open(data_path, 'r', encoding='utf-8') as json_file:
305
+ filtered_data = json.load(json_file)
306
+ print(f"Data loaded successfully. Total examples: {len(filtered_data)}")
307
+
308
+ # ---------------------- Batch Generation Function ----------------------
309
+ def generate_webpage_to_reasonchain_batch(
310
+ original_questions: List[str],
311
+ prev_reasonings: List[str],
312
+ search_queries: List[str],
313
+ documents: List[str],
314
+ dataset_name: str,
315
+ summ_model_url: OpenAI,
316
+ summ_model_path: str,
317
+ batch_output_records: List[Dict], # New parameter to collect outputs
318
+ coherent: bool = False,
319
+ ) -> List[str]:
320
+
321
+ user_prompts = [
322
+ get_webpage_to_reasonchain_instruction(r, sq, doc)
323
+ for r, sq, doc in zip(prev_reasonings, search_queries, documents)
324
+ ]
325
+
326
+
327
+ prompts = [{"role": "user", "content": up} for up in user_prompts]
328
+ print("webpage ana prompts[0]")
329
+ print(prompts[0])
330
+
331
+ webpage_analysis_single_to_map = partial(webpage_analysis_single, summ_model_url, summ_model_path)
332
+ with multiprocessing.Pool(processes=50) as pool:
333
+ raw_outputs = list(tqdm(pool.imap(webpage_analysis_single_to_map, prompts), total=len(prompts), desc="generate webpage analyses"))
334
+
335
+
336
+ # Count the number of summarization errors
337
+ sum_error = 0
338
+ for output in raw_outputs:
339
+ if output is None or output == "None" or output == "":
340
+ sum_error += 1
341
+ print(f"summarization_error: {sum_error}, ratios: {sum_error / len(raw_outputs)}")
342
+
343
+ extracted_infos = [extract_answer(raw, mode='infogen') for raw in raw_outputs]
344
+
345
+ for i, (p, r, e) in enumerate(zip(prompts, raw_outputs, extracted_infos)):
346
+ batch_output_records.append({
347
+ 'prompt': p,
348
+ 'raw_output': r,
349
+ 'extracted_info': e
350
+ })
351
+
352
+ return extracted_infos
353
+
354
+ # ---------------------- Preparation of Input Prompts ----------------------
355
+ input_list = []
356
+ for item in filtered_data:
357
+ question = item['Question']
358
+
359
+ if dataset_name in ['aime']:
360
+ instruction = get_multiqa_instruction(MAX_SEARCH_LIMIT)
361
+ user_prompt = get_task_instruction_math(question)
362
+
363
+ else:
364
+ instruction = get_multiqa_instruction(MAX_SEARCH_LIMIT)
365
+ user_prompt = get_task_instruction_openqa(question)
366
+
367
+
368
+ prompt = [{"role": "user", "content": instruction + user_prompt}]
369
+ prompt = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
370
+ input_list.append(prompt)
371
+
372
+ if subset_num != -1:
373
+ input_list = input_list[:subset_num]
374
+ filtered_data = filtered_data[:subset_num]
375
+
376
+ # Initialize active sequences
377
+ active_sequences = [{
378
+ 'item': item,
379
+ 'prompt': prompt,
380
+ 'output': '',
381
+ 'finished': False,
382
+ 'history': [],
383
+ 'search_count': 0,
384
+ 'executed_search_queries': set(),
385
+ 'all_info': [],
386
+ } for item, prompt in zip(filtered_data, input_list)]
387
+
388
+ # ---------------------- Generation Function ----------------------
389
+ def run_generation(sequences: List[Dict], max_tokens: int) -> List:
390
+ prompts = [s['prompt'] for s in sequences]
391
+
392
+ sampling_params = SamplingParams(
393
+ max_tokens=max_tokens,
394
+ temperature=temperature,
395
+ top_p=top_p,
396
+ top_k=top_k_sampling,
397
+ stop=[END_SEARCH_QUERY, tokenizer.eos_token],
398
+ include_stop_str_in_output=True,
399
+ )
400
+ output_list = llm.generate(prompts, sampling_params=sampling_params)
401
+ print(f"run_generation completed {len(output_list)}")
402
+ return output_list
403
+
404
+ # Function to extract text between two tags
405
+ def extract_between(text: str, start_tag: str, end_tag: str) -> Optional[str]:
406
+ pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
407
+ matches = re.findall(pattern, text, flags=re.DOTALL)
408
+ if matches:
409
+ return matches[-1].strip()
410
+ return None
411
+
412
+ def replace_recent_steps(origin_str, replace_str):
413
+ """
414
+ Replaces specific steps in the original reasoning steps with new steps.
415
+ If a replacement step contains "DELETE THIS STEP", that step is removed.
416
+
417
+ Parameters:
418
+ - origin_str (str): The original reasoning steps.
419
+ - replace_str (str): The steps to replace or delete.
420
+
421
+ Returns:
422
+ - str: The updated reasoning steps after applying replacements.
423
+ """
424
+
425
+ def parse_steps(text):
426
+ """
427
+ Parses the reasoning steps from a given text.
428
+
429
+ Parameters:
430
+ - text (str): The text containing reasoning steps.
431
+
432
+ Returns:
433
+ - dict: A dictionary mapping step numbers to their content.
434
+ """
435
+ step_pattern = re.compile(r"Step\s+(\d+):\s*")
436
+ steps = {}
437
+ current_step_num = None
438
+ current_content = []
439
+
440
+ for line in text.splitlines():
441
+ step_match = step_pattern.match(line)
442
+ if step_match:
443
+ # If there's an ongoing step, save its content
444
+ if current_step_num is not None:
445
+ steps[current_step_num] = "\n".join(current_content).strip()
446
+ current_step_num = int(step_match.group(1))
447
+ content = line[step_match.end():].strip()
448
+ current_content = [content] if content else []
449
+ else:
450
+ if current_step_num is not None:
451
+ current_content.append(line)
452
+
453
+ # Save the last step if any
454
+ if current_step_num is not None:
455
+ steps[current_step_num] = "\n".join(current_content).strip()
456
+
457
+ return steps
458
+
459
+ # Parse the original and replacement steps
460
+ origin_steps = parse_steps(origin_str)
461
+ replace_steps = parse_steps(replace_str)
462
+
463
+ # Apply replacements
464
+ for step_num, content in replace_steps.items():
465
+ if "DELETE THIS STEP" in content:
466
+ # Remove the step if it exists
467
+ if step_num in origin_steps:
468
+ del origin_steps[step_num]
469
+ else:
470
+ # Replace or add the step
471
+ origin_steps[step_num] = content
472
+
473
+ # Sort the steps by step number
474
+ sorted_steps = sorted(origin_steps.items())
475
+
476
+ # Reconstruct the reasoning steps as a single string
477
+ new_reasoning_steps = "\n\n".join([f"{content}" for num, content in sorted_steps])
478
+
479
+ return new_reasoning_steps
480
+
481
+ # ---------------------- Initialize Collection Structure ----------------------
482
+ # Initialize a list to collect batch outputs
483
+ batch_output_records = []
484
+
485
+ start_time = time.time()
486
+ turn = 0
487
+
488
+ # Main loop until all sequences are finished or maximum turns reached
489
+ while True:
490
+ # Identify sequences that need generation
491
+ sequences_needing_generation = [seq for seq in active_sequences if not seq['finished']]
492
+
493
+ if sequences_needing_generation:
494
+ turn += 1
495
+ print(f'\n-------------- Turn {turn} --------------')
496
+ print(f"We have {len(sequences_needing_generation)} sequences needing generation...")
497
+ outputs = run_generation(sequences_needing_generation, max_tokens)
498
+ print("Generation completed, processing outputs...")
499
+
500
+ # Initialize batch variables
501
+ batch_relevant_info = []
502
+ batch_original_questions = []
503
+ batch_prev_reasonings = []
504
+ batch_search_queries = []
505
+ batch_documents = []
506
+ batch_sequences = []
507
+
508
+ # Collect URLs to fetch across all sequences
509
+ all_urls_to_fetch = set()
510
+ url_snippets = {}
511
+ url_sequence_map = {} # Map URL to list of sequences needing it
512
+
513
+ start_search_time = time.time()
514
+ # Process each sequence and collect URLs
515
+ for seq, out in zip(sequences_needing_generation, outputs):
516
+ text = out.outputs[0].text
517
+ seq['history'].append(text)
518
+ # Append generated text to prompt and output
519
+ seq['prompt'] += text
520
+ seq['output'] += text
521
+ seq['all_info'].append({f"turn_{turn}_reason": text})
522
+ # Extract search query
523
+ search_query = extract_between(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
524
+
525
+ # If a search query is present and needs to be executed
526
+ if search_query and seq['output'].rstrip().endswith(END_SEARCH_QUERY):
527
+ if seq['search_count'] < MAX_SEARCH_LIMIT and search_query not in seq['executed_search_queries']:
528
+ # Execute search, use cache if available
529
+ if search_query in search_cache:
530
+ results = search_cache[search_query]
531
+ print(f"Using cached search results for query: \"{search_query}\"")
532
+ else:
533
+ try:
534
+ if is_exclude_urls and "urls" in seq["item"]["metadata"]:
535
+ print(f"is_exclude_urls: {is_exclude_urls}")
536
+ exclude_urls = seq["item"]["metadata"]["urls"]
537
+ else:
538
+ exclude_urls = []
539
+
540
+ print(f"Execute and cache search for query: \"{search_query}\"")
541
+ results = google_web_search(search_query, google_subscription_key, google_endpoint, market='en-US', language='en', exclude_urls=exclude_urls) # 执行搜索
542
+ search_cache[search_query] = results
543
+ print(f"Executed and cached search for query: \"{search_query}\"")
544
+ except Exception as e:
545
+ print(f"Error during search query '{search_query}': {e}")
546
+ search_cache[search_query] = {}
547
+ results = {}
548
+
549
+ # Extract relevant information from Bing search results
550
+ relevant_info = extract_relevant_info(results)[:top_k]
551
+ seq['relevant_info'] = relevant_info
552
+
553
+ # Extract URLs and snippets
554
+ urls_to_fetch = [it['url'] for it in relevant_info]
555
+ snippets = {info['url']: info['snippet'] for info in relevant_info if 'snippet' in info}
556
+
557
+ # Filter URLs that are not cached
558
+ urls_to_fetch_filtered = [u for u in urls_to_fetch if u not in url_cache]
559
+ cached_urls = [u for u in urls_to_fetch if u in url_cache]
560
+
561
+ # Store info for all_urls_to_fetch and url_snippets
562
+ for url in urls_to_fetch_filtered:
563
+ all_urls_to_fetch.add(url)
564
+ url_snippets[url] = snippets.get(url, "")
565
+
566
+ all_reasoning_steps = seq['output']
567
+ all_reasoning_steps = all_reasoning_steps.replace('\n\n', '\n').split("\n")
568
+
569
+ truncated_prev_reasoning = ""
570
+ for i, step in enumerate(all_reasoning_steps):
571
+ truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
572
+
573
+ prev_steps = truncated_prev_reasoning.split('\n\n')
574
+ if len(prev_steps) <= 5:
575
+ truncated_prev_reasoning = '\n\n'.join(prev_steps)
576
+ else:
577
+ truncated_prev_reasoning = ''
578
+ for i, step in enumerate(prev_steps):
579
+ if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
580
+ truncated_prev_reasoning += step + '\n\n'
581
+ else:
582
+ if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
583
+ truncated_prev_reasoning += '...\n\n'
584
+ truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
585
+
586
+ # Collect parameters for batch processing
587
+ batch_relevant_info.append(relevant_info)
588
+ batch_original_questions.append(seq['item']['Question'])
589
+ batch_prev_reasonings.append(truncated_prev_reasoning)
590
+ batch_search_queries.append(search_query)
591
+ batch_sequences.append(seq)
592
+
593
+ # Update search count and executed queries
594
+ seq['search_count'] += 1
595
+ seq['executed_search_queries'].add(search_query)
596
+
597
+ elif seq['search_count'] >= MAX_SEARCH_LIMIT:
598
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
599
+ seq['prompt'] += limit_message
600
+ seq['output'] += limit_message
601
+ seq['history'].append(limit_message)
602
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
603
+ print(f"Search limit reached for query: \"{search_query}\"")
604
+
605
+ elif search_query in seq['executed_search_queries']:
606
+ limit_message = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
607
+ seq['prompt'] += limit_message
608
+ seq['output'] += limit_message
609
+ seq['history'].append(limit_message)
610
+ seq["all_info"].append({f"turn_{turn}_search_limited": limit_message})
611
+ print(f"Repeated search for query: \"{search_query}\"")
612
+
613
+
614
+ else:
615
+ # If no search query needs to be executed, mark the sequence as finished
616
+ seq['finished'] = True
617
+ print("Sequence marked as complete.")
618
+
619
+ print(f"get search time taken: {time.time() - start_search_time}")
620
+ print(f"all_urls_to_fetch len: {len(all_urls_to_fetch)}, url_cache len: {len(url_cache)}")
621
+ print(f"all_urls_to_fetch: {all_urls_to_fetch}")
622
+ # Batch fetch all URLs at once to optimize speed
623
+
624
+ if all_urls_to_fetch:
625
+ print(f"Fetching {len(all_urls_to_fetch)} URLs...")
626
+ try:
627
+ fetched_contents = fetch_page_content(
628
+ list(all_urls_to_fetch),
629
+ use_jina=False,
630
+ jina_api_key=None,
631
+ # snippets=url_snippets # Do not pass snippets when updating url_cache directly
632
+ )
633
+ print(f"Fetched {len(fetched_contents)} URLs successfully.")
634
+ except Exception as e:
635
+ print(f"Error during batch URL fetching: {e}")
636
+ fetched_contents = {url: f"Error fetching URL: {e}" for url in all_urls_to_fetch}
637
+ # Update cache with fetched contents
638
+ for url, content in fetched_contents.items():
639
+ url_cache[url] = content
640
+
641
+ # After fetching, prepare formatted documents for batch processing
642
+ for relevant_info in batch_relevant_info:
643
+ formatted_documents = ""
644
+ for i, doc_info in enumerate(relevant_info):
645
+ url = doc_info['url']
646
+ raw_context = url_cache.get(url, "")
647
+ doc_info['snippet'] = doc_info['snippet'].replace('<b>','').replace('</b>','')
648
+ success, filtered_context = extract_snippet_with_context(raw_context, doc_info['snippet'], context_chars=max_doc_len)
649
+ if success:
650
+ print("extract_snippet_with_context")
651
+ context = filtered_context
652
+ else:
653
+ print(f"use raw_webpage_context, {len(raw_context)}")
654
+ context = raw_context[:max_doc_len*2]
655
+
656
+ doc_info['context'] = context
657
+ formatted_documents += f"**Web Page {i + 1}:**\n"
658
+ formatted_documents += json.dumps(doc_info, ensure_ascii=False, indent=2) + "\n"
659
+ print(f'formatted_webpage_documents: {len(formatted_documents)}')
660
+ batch_documents.append(formatted_documents)
661
+
662
+ # After fetching, prepare for batch processing if there are any
663
+ if batch_sequences:
664
+ print(f"Batch processing {len(batch_sequences)} sequences with generate_webpage_to_reasonchain_batch...")
665
+ webpage_analyses = generate_webpage_to_reasonchain_batch(
666
+ original_questions=batch_original_questions,
667
+ prev_reasonings=batch_prev_reasonings,
668
+ search_queries=batch_search_queries,
669
+ documents=batch_documents,
670
+ dataset_name=dataset_name,
671
+ summ_model_url=summ_model_url,
672
+ summ_model_path=summ_model_path,
673
+ batch_output_records=batch_output_records, # Pass the collection list
674
+ )
675
+ print("Batch generation completed, assigning outputs to sequences...")
676
+
677
+ for seq, analysis,doc in zip(batch_sequences, webpage_analyses, batch_documents):
678
+ if isinstance(analysis, str):
679
+ append_text = f"\n\n{BEGIN_SEARCH_RESULT}{analysis}{END_SEARCH_RESULT}\n\n"
680
+ seq['prompt'] += append_text
681
+ seq['output'] += append_text
682
+ seq['history'].append(append_text)
683
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
684
+ else:
685
+ append_text = replace_recent_steps(seq['output'], analysis)
686
+ seq['prompt'] += append_text
687
+ seq['output'] += append_text
688
+ seq['history'].append(append_text)
689
+ seq['all_info'].extend([{f"turn_{turn}_search": doc}, {f"turn_{turn}_webpage_analyses": analysis}])
690
+
691
+ # Check if all sequences are finished
692
+ active_sequences_part = [{
693
+ 'item': ele["item"],
694
+ 'prompt': ele['prompt'],
695
+ 'output': ele["output"],
696
+ 'finished': ele["finished"],
697
+ 'history':ele["history"],
698
+ 'search_count': ele["search_count"],
699
+ 'all_info': ele['all_info']
700
+ } for ele in active_sequences]
701
+ with open(os.path.join(output_dir, f"turn_{turn}.json"), 'w', encoding='utf-8') as f:
702
+ json.dump(active_sequences_part, f, ensure_ascii=False, indent=2)
703
+ unfinished = [seq for seq in active_sequences if not seq['finished']]
704
+ if not unfinished:
705
+ break
706
+ else:
707
+ if turn >= MAX_TURN:
708
+ print(f"Maximum number of turns ({MAX_TURN}) reached, stopping.")
709
+ break
710
+
711
+ total_time = time.time() - start_time
712
+ print(f"Total time taken: {total_time} seconds")
713
+
714
+ # ---------------------- Save Batch Output Records to JSON File ----------------------
715
+ # Define output JSON file path
716
+ t = time.localtime()
717
+ batch_output_file = os.path.join(output_dir, f'test.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.info_extract.json')
718
+
719
+ # Save batch_output_records to JSON file
720
+ with open(batch_output_file, 'w', encoding='utf-8') as f:
721
+ json.dump(batch_output_records, f, ensure_ascii=False, indent=2)
722
+
723
+ print(f"Batch outputs saved to {batch_output_file}")
724
+
725
+ # Prepare output list for evaluation
726
+ output_list = [seq['output'] for seq in active_sequences]
727
+
728
+ # Run evaluation
729
+ if dataset_name in ["eval", "gaia"]:
730
+ run_evaluation_for_eval(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, 'test')
731
+ else:
732
+ run_evaluation(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, 'test')
733
+
734
+ # ---------------------- Stage-wise Analysis ----------------------
735
+ turn_files = os.listdir(output_dir)
736
+ turn_files = [file for file in turn_files if file.startswith("turn_")]
737
+ max_turn_file = max(turn_files, key=lambda x: int(re.search(r'turn_(\d+)', x).group(1)))
738
+
739
+ max_turn_file_path = os.path.join(output_dir, max_turn_file)
740
+ print(f"max_turn_file_path: {max_turn_file_path}")
741
+ stage_wise_analysis(model_path, max_turn_file_path)
742
+
743
+ # ---------------------- Update Search and URL Cache ----------------------
744
+ print('Updating Search and URL Cache...')
745
+ # Load existing caches or initialize empty dictionaries
746
+ if os.path.exists(search_cache_path):
747
+ try:
748
+ with open(search_cache_path, 'r', encoding='utf-8') as f:
749
+ search_cache_new = json.load(f)
750
+ except Exception as e:
751
+ print(f"Error loading search cache: {e}")
752
+ search_cache_new = {}
753
+ else:
754
+ search_cache_new = {}
755
+
756
+ if os.path.exists(url_cache_path):
757
+ try:
758
+ with open(url_cache_path, 'r', encoding='utf-8') as f:
759
+ url_cache_new = json.load(f)
760
+ except Exception as e:
761
+ print(f"Error loading url cache: {e}")
762
+ url_cache_new = {}
763
+ else:
764
+ url_cache_new = {}
765
+
766
+ search_cache.update(search_cache_new)
767
+ url_cache.update(url_cache_new)
768
+
769
+ save_caches()
770
+
771
+ print("Process completed.")
772
+
773
+ if __name__ == "__main__":
774
+ main()
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_main.log ADDED
The diff for this file is too large to render. See raw diff
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_main.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ streamlit run SimpleDeepSearcher_demo_1.py --server.port 7910
2
+
3
+ # ssh -L 7910:localhost:7910 sunshuang@183.174.229.142
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_reasoning.log ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INFO 05-28 08:32:45 api_server.py:116] vLLM API server version 0.6.4
2
+ INFO 05-28 08:32:45 api_server.py:117] args: Namespace(host=None, port=5126, ssl_keyfile=None, ssl_certfile=None, ssl_ca_certs=None, ssl_cert_reqs=0, root_path=None, log_level='debug', model='/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41', task='auto', tokenizer=None, skip_tokenizer_init=False, revision=None, code_revision=None, tokenizer_revision=None, tokenizer_mode='auto', chat_template_text_format='string', trust_remote_code=True, allowed_local_media_path=None, download_dir=None, load_format='auto', config_format=<ConfigFormat.AUTO: 'auto'>, dtype='auto', kv_cache_dtype='auto', quantization_param_path=None, max_model_len=None, guided_decoding_backend='outlines', distributed_executor_backend=None, worker_use_ray=False, pipeline_parallel_size=1, tensor_parallel_size=2, max_parallel_loading_workers=None, ray_workers_use_nsight=False, block_size=16, enable_prefix_caching=False, disable_sliding_window=False, use_v2_block_manager=False, num_lookahead_slots=0, seed=0, swap_space=4, cpu_offload_gb=0, gpu_memory_utilization=0.9, num_gpu_blocks_override=None, max_num_batched_tokens=None, max_num_seqs=256, max_logprobs=20, disable_log_stats=False, quantization=None, rope_scaling=None, rope_theta=None, hf_overrides=None, enforce_eager=False, max_seq_len_to_capture=8192, disable_custom_all_reduce=False, tokenizer_pool_size=0, tokenizer_pool_type='ray', tokenizer_pool_extra_config=None, limit_mm_per_prompt=None, mm_processor_kwargs=None, enable_lora=False, enable_lora_bias=False, max_loras=1, max_lora_rank=16, lora_extra_vocab_size=256, lora_dtype='auto', long_lora_scaling_factors=None, max_cpu_loras=None, fully_sharded_loras=False, enable_prompt_adapter=False, max_prompt_adapters=1, max_prompt_adapter_token=0, device='auto', num_scheduler_steps=1, multi_step_stream_outputs=True, scheduler_delay_factor=0.0, enable_chunked_prefill=None, speculative_model=None, speculative_model_quantization=None, num_speculative_tokens=None, speculative_disable_mqa_scorer=False, speculative_draft_tensor_parallel_size=None, speculative_max_model_len=None, speculative_disable_by_batch_size=None, ngram_prompt_lookup_max=None, ngram_prompt_lookup_min=None, spec_decoding_acceptance_method='rejection_sampler', typical_acceptance_sampler_posterior_threshold=None, typical_acceptance_sampler_posterior_alpha=None, disable_logprobs_during_spec_decoding=None, model_loader_extra_config=None, ignore_patterns=[], preemption_mode=None, served_model_name=None, qlora_adapter_name_or_path=None, otlp_traces_endpoint=None, collect_detailed_traces=None, disable_async_output_proc=False, scheduling_policy='fcfs', override_neuron_config=None, override_pooler_config=None, disable_log_requests=False)
3
+ INFO 05-28 08:32:49 config.py:350] This model supports multiple tasks: {'generate', 'embedding'}. Defaulting to 'generate'.
4
+ INFO 05-28 08:32:49 config.py:1020] Defaulting to use mp for distributed inference
5
+ WARNING 05-28 08:32:49 arg_utils.py:1013] Chunked prefill is enabled by default for models with max_model_len > 32K. Currently, chunked prefill might not work with some features or models. If you encounter any issues, please disable chunked prefill by setting --enable-chunked-prefill=False.
6
+ WARNING 05-28 08:32:49 arg_utils.py:1075] [DEPRECATED] Block manager v1 has been removed, and setting --use-v2-block-manager to True or False has no effect on vLLM behavior. Please remove --use-v2-block-manager in your engine argument. If your use case is not supported by SelfAttnBlockSpaceManager (i.e. block manager v2), please file an issue with detailed information.
7
+ INFO 05-28 08:32:49 config.py:1136] Chunked prefill is enabled with max_num_batched_tokens=512.
8
+ INFO 05-28 08:32:49 llm_engine.py:249] Initializing an LLM engine (v0.6.4) with config: model='/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41', speculative_config=None, tokenizer='/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=131072, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, quantization_param_path=None, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='outlines'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=0, served_model_name=/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41, num_scheduler_steps=1, chunked_prefill_enabled=True multi_step_stream_outputs=True, enable_prefix_caching=False, use_async_output_proc=True, use_cached_outputs=False, chat_template_text_format=string, mm_processor_kwargs=None, pooler_config=None)
9
+ INFO 05-28 08:32:50 custom_cache_manager.py:17] Setting Triton cache manager to: vllm.triton_utils.custom_cache_manager:CustomCacheManager
10
+ INFO 05-28 08:32:50 selector.py:135] Using Flash Attention backend.
11
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:32:50 selector.py:135] Using Flash Attention backend.
12
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:32:50 multiproc_worker_utils.py:215] Worker ready; awaiting tasks
13
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:32:51 utils.py:960] Found nccl from library libnccl.so.2
14
+ INFO 05-28 08:32:51 utils.py:960] Found nccl from library libnccl.so.2
15
+ INFO 05-28 08:32:51 pynccl.py:69] vLLM is using nccl==2.21.5
16
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:32:51 pynccl.py:69] vLLM is using nccl==2.21.5
17
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:32:51 custom_all_reduce_utils.py:242] reading GPU P2P access cache from /root/.cache/vllm/gpu_p2p_access_cache_for_0,1.json
18
+ INFO 05-28 08:32:51 custom_all_reduce_utils.py:242] reading GPU P2P access cache from /root/.cache/vllm/gpu_p2p_access_cache_for_0,1.json
19
+ INFO 05-28 08:32:51 shm_broadcast.py:236] vLLM message queue communication handle: Handle(connect_ip='127.0.0.1', local_reader_ranks=[1], buffer=<vllm.distributed.device_communicators.shm_broadcast.ShmRingBuffer object at 0x7f3d811fb460>, local_subscribe_port=39377, remote_subscribe_port=None)
20
+ INFO 05-28 08:32:51 model_runner.py:1072] Starting to load model /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41...
21
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:32:51 model_runner.py:1072] Starting to load model /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41...
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+
34
+
35
+
36
+
37
+
38
+
39
+ INFO 05-28 08:33:13 model_runner.py:1077] Loading model weights took 30.7292 GB
40
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:33:13 model_runner.py:1077] Loading model weights took 30.7292 GB
41
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:33:14 worker.py:232] Memory profiling results: total_gpu_memory=79.32GiB initial_memory_usage=32.14GiB peak_torch_memory=30.83GiB memory_usage_post_profile=32.88Gib non_torch_memory=2.11GiB kv_cache_size=38.44GiB gpu_memory_utilization=0.90
42
+ INFO 05-28 08:33:14 worker.py:232] Memory profiling results: total_gpu_memory=79.32GiB initial_memory_usage=32.14GiB peak_torch_memory=32.15GiB memory_usage_post_profile=33.13Gib non_torch_memory=2.36GiB kv_cache_size=36.87GiB gpu_memory_utilization=0.90
43
+ INFO 05-28 08:33:14 distributed_gpu_executor.py:57] # GPU blocks: 18877, # CPU blocks: 2048
44
+ INFO 05-28 08:33:14 distributed_gpu_executor.py:61] Maximum concurrency for 131072 tokens per request: 2.30x
45
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:33:16 model_runner.py:1400] Capturing cudagraphs for decoding. This may lead to unexpected consequences if the model is not static. To run the model in eager mode, set 'enforce_eager=True' or use '--enforce-eager' in the CLI.
46
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:33:16 model_runner.py:1404] If out-of-memory error occurs during cudagraph capture, consider decreasing `gpu_memory_utilization` or switching to eager mode. You can also reduce the `max_num_seqs` as needed to decrease memory usage.
47
+ INFO 05-28 08:33:16 model_runner.py:1400] Capturing cudagraphs for decoding. This may lead to unexpected consequences if the model is not static. To run the model in eager mode, set 'enforce_eager=True' or use '--enforce-eager' in the CLI.
48
+ INFO 05-28 08:33:16 model_runner.py:1404] If out-of-memory error occurs during cudagraph capture, consider decreasing `gpu_memory_utilization` or switching to eager mode. You can also reduce the `max_num_seqs` as needed to decrease memory usage.
49
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:33:27 custom_all_reduce.py:224] Registering 4515 cuda graph addresses
50
+ INFO 05-28 08:33:27 custom_all_reduce.py:224] Registering 4515 cuda graph addresses
51
+ (VllmWorkerProcess pid=7046) INFO 05-28 08:33:27 model_runner.py:1518] Graph capturing finished in 11 secs, took 0.38 GiB
52
+ INFO 05-28 08:33:27 model_runner.py:1518] Graph capturing finished in 11 secs, took 0.39 GiB
53
+ INFO 05-28 08:33:27 launcher.py:19] Available routes are:
54
+ INFO 05-28 08:33:27 launcher.py:27] Route: /openapi.json, Methods: GET, HEAD
55
+ INFO 05-28 08:33:27 launcher.py:27] Route: /docs, Methods: GET, HEAD
56
+ INFO 05-28 08:33:27 launcher.py:27] Route: /docs/oauth2-redirect, Methods: GET, HEAD
57
+ INFO 05-28 08:33:27 launcher.py:27] Route: /redoc, Methods: GET, HEAD
58
+ INFO 05-28 08:33:27 launcher.py:27] Route: /health, Methods: GET
59
+ INFO 05-28 08:33:27 launcher.py:27] Route: /generate, Methods: POST
60
+ INFO: Started server process [6677]
61
+ INFO: Waiting for application startup.
62
+ INFO: Application startup complete.
63
+ INFO: Uvicorn running on http://0.0.0.0:5126 (Press CTRL+C to quit)
64
+ INFO: 127.0.0.1:47818 - "POST /generate HTTP/1.1" 200 OK
65
+ INFO 05-28 08:35:16 async_llm_engine.py:208] Added request 7a9a2ee18fb446afbd060d51c224190e.
66
+ INFO 05-28 08:35:16 metrics.py:449] Avg prompt throughput: 2.4 tokens/s, Avg generation throughput: 0.0 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.1%, CPU KV cache usage: 0.0%.
67
+ INFO 05-28 08:35:18 async_llm_engine.py:176] Finished request 7a9a2ee18fb446afbd060d51c224190e.
68
+ INFO: 127.0.0.1:32768 - "POST /generate HTTP/1.1" 200 OK
69
+ INFO 05-28 08:35:43 async_llm_engine.py:208] Added request 0274d63783a9424895ebf3967037eea6.
70
+ INFO 05-28 08:35:43 metrics.py:449] Avg prompt throughput: 18.7 tokens/s, Avg generation throughput: 4.5 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.2%, CPU KV cache usage: 0.0%.
71
+ INFO 05-28 08:35:45 async_llm_engine.py:176] Finished request 0274d63783a9424895ebf3967037eea6.
72
+ INFO: 127.0.0.1:34688 - "POST /generate HTTP/1.1" 200 OK
73
+ INFO 05-28 08:36:18 async_llm_engine.py:208] Added request e91a7ffc50e64c208a8562539904b167.
74
+ INFO 05-28 08:36:18 metrics.py:449] Avg prompt throughput: 11.5 tokens/s, Avg generation throughput: 3.2 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.1%, CPU KV cache usage: 0.0%.
75
+ INFO 05-28 08:36:23 async_llm_engine.py:176] Finished request e91a7ffc50e64c208a8562539904b167.
76
+ INFO: 127.0.0.1:46322 - "POST /generate HTTP/1.1" 200 OK
77
+ INFO 05-28 08:36:46 async_llm_engine.py:208] Added request 2079f99c40994405b88a84c49074e96a.
78
+ INFO 05-28 08:36:46 metrics.py:449] Avg prompt throughput: 11.0 tokens/s, Avg generation throughput: 9.2 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.1%, CPU KV cache usage: 0.0%.
79
+ INFO 05-28 08:36:51 async_llm_engine.py:176] Finished request 2079f99c40994405b88a84c49074e96a.
80
+ INFO: 127.0.0.1:37476 - "POST /generate HTTP/1.1" 200 OK
81
+ INFO 05-28 08:36:59 async_llm_engine.py:208] Added request c35e0822295247489b4b19365703d8be.
82
+ INFO 05-28 08:36:59 metrics.py:449] Avg prompt throughput: 25.1 tokens/s, Avg generation throughput: 18.9 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.1%, CPU KV cache usage: 0.0%.
83
+ INFO 05-28 08:37:01 async_llm_engine.py:176] Finished request c35e0822295247489b4b19365703d8be.
84
+ INFO: 127.0.0.1:43218 - "POST /generate HTTP/1.1" 200 OK
85
+ INFO 05-28 08:37:18 async_llm_engine.py:208] Added request 007a4fe3f4bc41eeaebe4edfbb4d698a.
86
+ INFO 05-28 08:37:18 metrics.py:449] Avg prompt throughput: 27.0 tokens/s, Avg generation throughput: 5.3 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.2%, CPU KV cache usage: 0.0%.
87
+ INFO 05-28 08:37:20 async_llm_engine.py:176] Finished request 007a4fe3f4bc41eeaebe4edfbb4d698a.
88
+ INFO: 127.0.0.1:33394 - "POST /generate HTTP/1.1" 200 OK
89
+ INFO 05-28 09:11:47 async_llm_engine.py:208] Added request 6e201846b67c4419933e05e2f670de1c.
90
+ INFO 05-28 09:11:47 metrics.py:449] Avg prompt throughput: 0.2 tokens/s, Avg generation throughput: 0.0 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.1%, CPU KV cache usage: 0.0%.
91
+ INFO 05-28 09:11:52 metrics.py:449] Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 58.1 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.2%, CPU KV cache usage: 0.0%.
92
+ INFO 05-28 09:11:52 async_llm_engine.py:176] Finished request 6e201846b67c4419933e05e2f670de1c.
93
+ INFO: 127.0.0.1:38640 - "POST /generate HTTP/1.1" 200 OK
94
+ INFO 05-28 09:12:08 async_llm_engine.py:208] Added request 920b955f49c74fe48b8f2442bbd29176.
95
+ INFO 05-28 09:12:08 metrics.py:449] Avg prompt throughput: 32.5 tokens/s, Avg generation throughput: 1.1 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.2%, CPU KV cache usage: 0.0%.
96
+ INFO 05-28 09:12:10 async_llm_engine.py:176] Finished request 920b955f49c74fe48b8f2442bbd29176.
97
+ INFO: 127.0.0.1:57750 - "POST /generate HTTP/1.1" 200 OK
98
+ INFO 05-28 09:12:36 async_llm_engine.py:208] Added request f1a574e3675e4d659f7264c0930c9f28.
99
+ INFO 05-28 09:12:36 metrics.py:449] Avg prompt throughput: 26.3 tokens/s, Avg generation throughput: 5.3 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.4%, CPU KV cache usage: 0.0%.
100
+ INFO 05-28 09:12:38 async_llm_engine.py:176] Finished request f1a574e3675e4d659f7264c0930c9f28.
101
+ INFO: 127.0.0.1:33516 - "POST /generate HTTP/1.1" 200 OK
102
+ INFO 05-28 09:13:06 async_llm_engine.py:208] Added request dbbd72486eef4c78b2a1955745e5754d.
103
+ INFO 05-28 09:13:06 metrics.py:449] Avg prompt throughput: 40.4 tokens/s, Avg generation throughput: 4.5 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.5%, CPU KV cache usage: 0.0%.
104
+ INFO 05-28 09:13:08 async_llm_engine.py:176] Finished request dbbd72486eef4c78b2a1955745e5754d.
105
+ INFO 05-28 10:08:05 launcher.py:57] Shutting down FastAPI HTTP server.
106
+ INFO: Shutting down
107
+ INFO: Waiting for application shutdown.
108
+ INFO: Application shutdown complete.
109
+ INFO 05-28 10:08:05 async_llm_engine.py:62] Engine is gracefully shutting down.
110
+ INFO 05-28 10:08:06 multiproc_worker_utils.py:120] Killing local vLLM worker processes
111
+ [rank0]:[W528 10:08:06.073770299 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present, but this warning has only been added since PyTorch 2.4 (function operator())
112
+ /opt/aps/python/lib/python3.10/multiprocessing/resource_tracker.py:224: UserWarning: resource_tracker: There appear to be 1 leaked shared_memory objects to clean up at shutdown
113
+ warnings.warn('resource_tracker: There appear to be %d '
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_reasoning.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=0,1
2
+ export MODEL_PATH=/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41
3
+ python -m vllm.entrypoints.api_server \
4
+ --port 5126 \
5
+ --model $MODEL_PATH \
6
+ --tensor-parallel-size 2 \
7
+ --trust-remote-code
8
+ # ssh -R 5129:localhost:5129 -NT -o ServerAliveInterval=60 aibox-142
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_summ.log ADDED
The diff for this file is too large to render. See raw diff
 
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/launch_summ.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # export CUDA_VISIBLE_DEVICES=0,1
3
+ # nohup vllm serve /capacity/userdata/models/QwQ-32B \
4
+ # --tensor-parallel-size=2 \
5
+ # --gpu-memory-utilization 0.95 \
6
+ # --port 8003 > /opt/aps/workdir/sunshuang/deep_search/search_o1/sft_logs/vllm_serve_qwq_1.log 2>&1 &
7
+ export CUDA_VISIBLE_DEVICES=2,3
8
+ vllm serve /capacity/userdata/models/QwQ-32B \
9
+ --tensor-parallel-size=2 \
10
+ --gpu-memory-utilization 0.95 \
11
+ --port 8003
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lcb_runner.benchmarks.code_generation import (
2
+ CodeGenerationProblem,
3
+ load_code_generation_dataset,
4
+ load_code_generation_dataset_not_fast,
5
+ )
6
+ from lcb_runner.benchmarks.test_output_prediction import (
7
+ TestOutputPredictionProblem,
8
+ load_test_prediction_dataset,
9
+ )
10
+ from lcb_runner.benchmarks.code_execution import (
11
+ CodeExecutionProblem,
12
+ load_code_execution_dataset,
13
+ )
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/code_execution.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from enum import Enum
3
+ from datetime import datetime
4
+ from dataclasses import dataclass
5
+
6
+ from datasets import load_dataset
7
+
8
+
9
+ @dataclass
10
+ class CodeExecutionProblem:
11
+ question_id: str
12
+ contest_id: str
13
+ contest_date: datetime
14
+ difficulty: str
15
+ function_name: str
16
+ code: str
17
+ input: str
18
+ output: str
19
+ id: str
20
+ problem_id: str
21
+ numsteps: int
22
+
23
+ def __post_init__(self):
24
+ pass
25
+
26
+ def insert_output(self, output_list: list[str], pred_list: list[str]) -> dict:
27
+ return {
28
+ "question_id": self.question_id,
29
+ "contest_id": self.contest_id,
30
+ "contest_date": self.contest_date.isoformat(),
31
+ "difficulty": self.difficulty,
32
+ "function_name": self.function_name,
33
+ "code": self.code,
34
+ "input": self.input,
35
+ "output": self.output,
36
+ "id": self.id,
37
+ "problem_id": self.problem_id,
38
+ "numsteps": self.numsteps,
39
+ "output_list": output_list,
40
+ "pred_list": pred_list,
41
+ }
42
+
43
+ def insert_output_evaluation(
44
+ self, output_list: list[str], code_list: list[str], graded_list: list[bool]
45
+ ) -> dict:
46
+ output = self.insert_output(output_list, code_list)
47
+ output["graded_list"] = graded_list
48
+ output["pass@1"] = graded_list.count(True) / len(graded_list)
49
+ return output
50
+
51
+ def get_evaluation_sample(self) -> dict:
52
+ return {
53
+ "code": self.code,
54
+ "input": self.input,
55
+ "output": self.output,
56
+ }
57
+
58
+
59
+ def load_code_execution_dataset(release_version="release_v1") -> list[CodeExecutionProblem]:
60
+ dataset = load_dataset("livecodebench/execution-v2", split="test")
61
+ dataset = [CodeExecutionProblem(**p) for p in dataset] # type: ignore
62
+ print(f"Loaded {len(dataset)} problems")
63
+ return dataset
64
+
65
+
66
+ if __name__ == "__main__":
67
+ dataset = load_code_execution_dataset()
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/code_generation.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import zlib
3
+ import pickle
4
+ import base64
5
+ from enum import Enum
6
+ from datetime import datetime
7
+ from dataclasses import dataclass
8
+
9
+ from datasets import load_dataset
10
+
11
+
12
+ class Platform(Enum):
13
+ LEETCODE = "leetcode"
14
+ CODEFORCES = "codeforces"
15
+ ATCODER = "atcoder"
16
+
17
+
18
+ class Difficulty(Enum):
19
+ EASY = "easy"
20
+ MEDIUM = "medium"
21
+ HARD = "hard"
22
+
23
+
24
+ class TestType(Enum):
25
+ STDIN = "stdin"
26
+ FUNCTIONAL = "functional"
27
+
28
+
29
+ @dataclass
30
+ class Test:
31
+ input: str
32
+ output: str
33
+ testtype: TestType
34
+
35
+ def __post_init__(self):
36
+ self.testtype = TestType(self.testtype)
37
+ # if self.testtype == TestType.FUNCTIONAL:
38
+ # self.input = json.loads(self.input)
39
+ # self.output = json.loads(self.output)
40
+
41
+
42
+ @dataclass
43
+ class CodeGenerationProblem:
44
+ question_title: str
45
+ question_content: str
46
+ platform: Platform
47
+ question_id: str
48
+ contest_id: str
49
+ contest_date: datetime
50
+ starter_code: str
51
+ difficulty: Difficulty
52
+ public_test_cases: list[Test]
53
+ private_test_cases: list[Test]
54
+ metadata: dict
55
+
56
+ def __post_init__(self):
57
+ self.platform = Platform(self.platform)
58
+ self.difficulty = Difficulty(self.difficulty)
59
+ self.contest_date = datetime.fromisoformat(self.contest_date)
60
+
61
+ self.public_test_cases = json.loads(self.public_test_cases) # type: ignore
62
+ self.public_test_cases = [Test(**t) for t in self.public_test_cases]
63
+
64
+ try:
65
+ self.private_test_cases = json.loads(self.private_test_cases) # type: ignore
66
+ except:
67
+ self.private_test_cases = json.loads(
68
+ pickle.loads(
69
+ zlib.decompress(
70
+ base64.b64decode(self.private_test_cases.encode("utf-8")) # type: ignore
71
+ )
72
+ )
73
+ ) # type: ignore
74
+ self.private_test_cases = [Test(**t) for t in self.private_test_cases]
75
+
76
+ self.metadata = json.loads(self.metadata) # type: ignore
77
+
78
+ def insert_output(self, output_list: list[str], code_list: list[str]) -> dict:
79
+ return {
80
+ "question_title": self.question_title,
81
+ "question_content": self.question_content,
82
+ "platform": self.platform.value,
83
+ "question_id": self.question_id,
84
+ "contest_id": self.contest_id,
85
+ "contest_date": self.contest_date.isoformat(),
86
+ "starter_code": self.starter_code,
87
+ "difficulty": self.difficulty.value,
88
+ "output_list": output_list,
89
+ "code_list": code_list,
90
+ }
91
+
92
+ def insert_output_evaluation(
93
+ self,
94
+ output_list: list[str],
95
+ code_list: list[str],
96
+ graded_list: list[bool],
97
+ **kwargs,
98
+ ) -> dict:
99
+ output = self.insert_output(output_list, code_list)
100
+ output["graded_list"] = graded_list
101
+ output["pass@1"] = graded_list.count(True) / len(graded_list)
102
+ for k, v in kwargs.items():
103
+ output[k] = v
104
+ return output
105
+
106
+ def get_evaluation_sample(self):
107
+ return {
108
+ "input_output": json.dumps(
109
+ {
110
+ "inputs": [
111
+ t.input
112
+ for t in self.public_test_cases + self.private_test_cases
113
+ ],
114
+ "outputs": [
115
+ t.output
116
+ for t in self.public_test_cases + self.private_test_cases
117
+ ],
118
+ "fn_name": self.metadata.get("func_name", None),
119
+ }
120
+ ),
121
+ }
122
+
123
+
124
+ def load_code_generation_dataset(release_version="release_v1") -> list[CodeGenerationProblem]:
125
+ dataset = load_dataset("livecodebench/code_generation_lite", split="test", version_tag=release_version, trust_remote_code=True)
126
+ dataset = [CodeGenerationProblem(**p) for p in dataset] # type: ignore
127
+ print(f"Loaded {len(dataset)} problems")
128
+ return dataset
129
+
130
+
131
+ def load_code_generation_dataset_not_fast(release_version="release_v1") -> list[CodeGenerationProblem]:
132
+ dataset = load_dataset("livecodebench/code_generation", split="test")
133
+ dataset = [CodeGenerationProblem(**p) for p in dataset] # type: ignore
134
+ print(f"Loaded {len(dataset)} problems")
135
+ return dataset
136
+
137
+
138
+ if __name__ == "__main__":
139
+ dataset = load_code_generation_dataset()
deep_search/search_o1/scripts/infer_github/SimpleDeepSearcher_new/inference/lcb_runner/benchmarks/test_output_prediction.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from enum import Enum
3
+ from datetime import datetime
4
+ from dataclasses import dataclass
5
+
6
+ from datasets import load_dataset
7
+
8
+
9
+ @dataclass
10
+ class Test:
11
+ input: str
12
+ output: str
13
+ testtype: str
14
+
15
+
16
+ @dataclass
17
+ class TestOutputPredictionProblem:
18
+ question_title: str
19
+ question_content: str
20
+ question_id: str
21
+ contest_id: str
22
+ contest_date: datetime
23
+ difficulty: str
24
+ test: list[Test]
25
+ starter_code: str
26
+ function_name: str
27
+ test_id: int
28
+
29
+ def __post_init__(self):
30
+ self.test = [Test(**t) for t in json.loads(self.test)] # type: ignore
31
+
32
+ def insert_output(self, output_list: list[str], pred_list: list[str]) -> dict:
33
+ return {
34
+ "question_title": self.question_title,
35
+ "question_content": self.question_content,
36
+ "question_id": self.question_id,
37
+ "contest_id": self.contest_id,
38
+ "contest_date": self.contest_date.isoformat(),
39
+ "difficulty": self.difficulty,
40
+ "output_list": output_list,
41
+ "pred_list": pred_list,
42
+ "test_id": self.test_id,
43
+ "function_name": self.function_name,
44
+ "starter_code": self.starter_code,
45
+ }
46
+
47
+ def insert_output_evaluation(
48
+ self, output_list: list[str], code_list: list[str], graded_list: list[bool]
49
+ ) -> dict:
50
+ output = self.insert_output(output_list, code_list)
51
+ output["graded_list"] = graded_list
52
+ output["pass@1"] = graded_list.count(True) / len(graded_list)
53
+ return output
54
+
55
+ def get_evaluation_sample(self) -> dict:
56
+ return {
57
+ "input": self.question_content,
58
+ "output": self.test[0].output,
59
+ }
60
+
61
+
62
+ def load_test_prediction_dataset(release_version="release_v1") -> list[TestOutputPredictionProblem]:
63
+ dataset = load_dataset("livecodebench/test_generation", split="test") # type: ignore
64
+ dataset = [TestOutputPredictionProblem(**d) for d in dataset]
65
+ print(f"Loaded {len(dataset)} prediction problems")
66
+ return dataset
67
+
68
+
69
+ if __name__ == "__main__":
70
+ dataset = load_test_prediction_dataset()