Add files using upload-large-folder tool
Browse files- deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B.log +42 -0
- deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_1.log +35 -0
- deep_search/sft/3-28_merged_1174_zh_296_sft_1469_doc_by_itself_QwQ-32B.log +109 -0
- deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B.log +40 -0
- deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_1.log +81 -0
- deep_search/sft/4-1_no_error_data_871_doc_by_itself_QwQ-32B.log +78 -0
- deep_search/sft/4-24__doc_by_itself__add_math871.log +4 -0
- deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search.log +44 -0
- deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search_1.log +36 -0
- deep_search/sft/4-24_no_error_data_871_doc_by_itself_DeepSeek-R1-Distill-Qwen-7B.log +198 -0
- deep_search/sft/4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871_1.log +27 -0
- deep_search/sft/4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871_2.log +97 -0
- deep_search/sft/4-25_no_error_data_871_doc_by_itself_QwQ-32B_1.log +746 -0
- deep_search/sft/4-4_no_error_data_871_doc_by_itself_DeepSeek-R1-Distill-Qwen-32.log +115 -0
- deep_search/sft/5-5_ablation_ques_domain_filtered_data_738_Qwen2.5-7B-Instruct.log +128 -0
- deep_search/sft/5-5_ablation_ques_keywords_filtered_data_727_Qwen2.5-7B-Instruct.log +128 -0
- deep_search/sft/5-5_ablation_ques_yiwenci_filtered_data_811_Qwen2.5-7B-Instruct.log +173 -0
- deep_search/sft/5-5_ablation_resp_format_1064_random_sample_871_Qwen2.5-7B-Instruct.log +72 -0
- deep_search/sft/ds_zero3_offload.json +39 -0
- deep_search/sft/gen_data_2.log +4 -0
- deep_search/sft/hostfile +2 -0
- deep_search/sft/mix.sh +265 -0
- deep_search/sft/mix_2.sh +295 -0
- deep_search/sft/mix_2_1.sh +174 -0
- deep_search/sft/mix_math.sh +62 -0
- deep_search/sft/mix_math_after_search.sh +61 -0
- deep_search/sft/mix_math_multi_node_1.sh +66 -0
- deep_search/sft/mix_math_sht_new_prompt.sh +62 -0
- deep_search/sft/mix_wo_mask.sh +62 -0
- deep_search/sft/run.sh +25 -0
- deep_search/sft/sft.py +204 -0
- deep_search/sft/sft_2_math_after_search.py +260 -0
- deep_search/sft/sft_2_wo_mask.py +261 -0
- deep_search/sft/test.ipynb +58 -0
- deep_search/sft/test_len.sh +25 -0
- deep_search/sft/test_two_model_qwq.sh +64 -0
- deep_search/sft/test_two_model_qwq_1.sh +69 -0
- deep_search/sft/train-inst.py +311 -0
- deep_search/sft/train-inst.sh +56 -0
- deep_search/sft/train_env.yml +145 -0
- deep_search/sft/train_requirements.txt +116 -0
- deep_search/sft/wait_eval_1.py +167 -0
- deep_search/sft/wait_eval_use_one_model_for_ckpt.py +178 -0
- deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py +178 -0
- deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_dpsk.py +178 -0
- deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq.py +178 -0
- deep_search/sft/wait_eval_use_one_model_for_ckpt_worker0.py +180 -0
- ssh_node.sh +5 -0
- test.py +58 -0
- train_requirements.txt +122 -0
deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [168, 143, 115, 86]
|
| 3 |
+
step_list: [168, 143, 115, 86]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/eval/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/realqa/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/realqa/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/realqa/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval/inf.log 2>&1 &
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/realqa/inf.log 2>&1 &
|
| 30 |
+
available_gpus: [4, 5]
|
| 31 |
+
The following command is about to run:
|
| 32 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 33 |
+
available_gpus: [4, 5]
|
| 34 |
+
The following command is about to run:
|
| 35 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 36 |
+
available_gpus: [4, 5]
|
| 37 |
+
The following command is about to run:
|
| 38 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 39 |
+
available_gpus: [4, 5]
|
| 40 |
+
The following command is about to run:
|
| 41 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 42 |
+
Wish me good luck!
|
deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_1.log
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [168, 143, 115, 86]
|
| 3 |
+
step_list: [168, 143, 115, 86]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/eval
|
| 7 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/realqa
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval/inf.log 2>&1 &
|
| 11 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/realqa
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [2, 3, 4, 6, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 26 |
+
available_gpus: [4, 6, 7]
|
| 27 |
+
The following command is about to run:
|
| 28 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 29 |
+
available_gpus: [4, 6, 7]
|
| 30 |
+
The following command is about to run:
|
| 31 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 32 |
+
available_gpus: [4, 6, 7]
|
| 33 |
+
The following command is about to run:
|
| 34 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 35 |
+
Wish me good luck!
|
deep_search/sft/3-28_merged_1174_zh_296_sft_1469_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [138, 115, 92, 69]
|
| 3 |
+
step_list: [138, 115, 92, 69]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-138) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,2 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-138 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/eval/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-138 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/realqa/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-115) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/realqa/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-92) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-92 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/eval/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-92 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/realqa/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-69) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/realqa/inf.log 2>&1 &
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-138 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-138 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/138/realqa/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/eval/inf.log 2>&1 &
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/115/realqa/inf.log 2>&1 &
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: [1]
|
| 32 |
+
available_gpus: [1]
|
| 33 |
+
available_gpus: [1]
|
| 34 |
+
available_gpus: [1]
|
| 35 |
+
available_gpus: [1]
|
| 36 |
+
available_gpus: [1]
|
| 37 |
+
available_gpus: [1]
|
| 38 |
+
available_gpus: [1]
|
| 39 |
+
available_gpus: [1]
|
| 40 |
+
available_gpus: [1]
|
| 41 |
+
available_gpus: [1]
|
| 42 |
+
available_gpus: [1]
|
| 43 |
+
available_gpus: [1]
|
| 44 |
+
available_gpus: [1]
|
| 45 |
+
available_gpus: [1]
|
| 46 |
+
available_gpus: [1]
|
| 47 |
+
available_gpus: [1]
|
| 48 |
+
available_gpus: [1]
|
| 49 |
+
available_gpus: [1]
|
| 50 |
+
available_gpus: [1]
|
| 51 |
+
available_gpus: [1]
|
| 52 |
+
available_gpus: [1]
|
| 53 |
+
available_gpus: [1]
|
| 54 |
+
available_gpus: [1]
|
| 55 |
+
available_gpus: [1]
|
| 56 |
+
available_gpus: [1]
|
| 57 |
+
available_gpus: [1]
|
| 58 |
+
available_gpus: [1]
|
| 59 |
+
available_gpus: [1]
|
| 60 |
+
available_gpus: [1]
|
| 61 |
+
available_gpus: [1]
|
| 62 |
+
available_gpus: [1]
|
| 63 |
+
available_gpus: [1]
|
| 64 |
+
available_gpus: [1]
|
| 65 |
+
available_gpus: [1]
|
| 66 |
+
available_gpus: [1]
|
| 67 |
+
available_gpus: [1]
|
| 68 |
+
available_gpus: [1]
|
| 69 |
+
available_gpus: [1]
|
| 70 |
+
available_gpus: [1]
|
| 71 |
+
available_gpus: [1]
|
| 72 |
+
available_gpus: [1]
|
| 73 |
+
available_gpus: [1, 4, 5]
|
| 74 |
+
The following command is about to run:
|
| 75 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=1,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-92 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/eval/inf.log 2>&1 &
|
| 76 |
+
available_gpus: [1, 4, 5]
|
| 77 |
+
The following command is about to run:
|
| 78 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=1,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-92 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/92/realqa/inf.log 2>&1 &
|
| 79 |
+
available_gpus: [5]
|
| 80 |
+
available_gpus: [5]
|
| 81 |
+
available_gpus: [5]
|
| 82 |
+
available_gpus: [5]
|
| 83 |
+
available_gpus: [5]
|
| 84 |
+
available_gpus: [5]
|
| 85 |
+
available_gpus: [5]
|
| 86 |
+
available_gpus: [5]
|
| 87 |
+
available_gpus: [5]
|
| 88 |
+
available_gpus: [5]
|
| 89 |
+
available_gpus: [5]
|
| 90 |
+
available_gpus: [5]
|
| 91 |
+
available_gpus: [5]
|
| 92 |
+
available_gpus: [5]
|
| 93 |
+
available_gpus: [5]
|
| 94 |
+
available_gpus: [5]
|
| 95 |
+
available_gpus: [5]
|
| 96 |
+
available_gpus: [5]
|
| 97 |
+
available_gpus: [5]
|
| 98 |
+
available_gpus: [5]
|
| 99 |
+
available_gpus: [5]
|
| 100 |
+
available_gpus: [5]
|
| 101 |
+
available_gpus: [5]
|
| 102 |
+
available_gpus: [0, 2, 5]
|
| 103 |
+
The following command is about to run:
|
| 104 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,2 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/eval/inf.log 2>&1 &
|
| 105 |
+
available_gpus: [5]
|
| 106 |
+
available_gpus: [5, 6, 7]
|
| 107 |
+
The following command is about to run:
|
| 108 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469/69/realqa/inf.log 2>&1 &
|
| 109 |
+
Wish me good luck!
|
deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [102, 86, 69, 51]
|
| 2 |
+
step_list: [102, 86, 69, 51]
|
| 3 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/realqa/inf.log 2>&1 &
|
| 15 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51) to exist...
|
| 16 |
+
The checkpoint exists. Waiting for running...
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/eval/inf.log 2>&1 &
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/realqa/inf.log 2>&1 &
|
| 19 |
+
All checkpoints exist. Wait for runing...
|
| 20 |
+
available_gpus: [0, 1, 2, 4, 5, 7]
|
| 21 |
+
The following command is about to run:
|
| 22 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval/inf.log 2>&1 &
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa/inf.log 2>&1 &
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval/inf.log 2>&1 &
|
| 27 |
+
available_gpus: [0, 1, 2, 4, 5, 7]
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa/inf.log 2>&1 &
|
| 30 |
+
The following command is about to run:
|
| 31 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval/inf.log 2>&1 &
|
| 32 |
+
The following command is about to run:
|
| 33 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/realqa/inf.log 2>&1 &
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: [0, 1, 2, 4]
|
| 36 |
+
The following command is about to run:
|
| 37 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/eval/inf.log 2>&1 &
|
| 38 |
+
The following command is about to run:
|
| 39 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/realqa/inf.log 2>&1 &
|
| 40 |
+
Wish me good luck!
|
deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_1.log
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [102, 86, 69, 51]
|
| 3 |
+
step_list: [102, 86, 69, 51]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval/inf.log 2>&1 &
|
| 15 |
+
skip evaluated model: JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/realqa
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
skip evaluated model: JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/eval
|
| 19 |
+
skip evaluated model: JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/realqa
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [0, 1, 2, 4, 5, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/realqa/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/eval/inf.log 2>&1 &
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: [2, 4]
|
| 74 |
+
The following command is about to run:
|
| 75 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/realqa/inf.log 2>&1 &
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: [0, 1]
|
| 79 |
+
The following command is about to run:
|
| 80 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/eval/inf.log 2>&1 &
|
| 81 |
+
Wish me good luck!
|
deep_search/sft/4-1_no_error_data_871_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [78, 68, 55, 41]
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/realqa/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/realqa/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa/inf.log 2>&1 &
|
| 15 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41) to exist...
|
| 16 |
+
The checkpoint exists. Waiting for running...
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/eval/inf.log 2>&1 &
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/realqa/inf.log 2>&1 &
|
| 19 |
+
All checkpoints exist. Wait for runing...
|
| 20 |
+
available_gpus: [0, 1, 2, 4, 5, 7]
|
| 21 |
+
The following command is about to run:
|
| 22 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/eval/inf.log 2>&1 &
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/78/realqa/inf.log 2>&1 &
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/eval/inf.log 2>&1 &
|
| 27 |
+
available_gpus: [0, 1, 2, 4, 5, 7]
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/68/realqa/inf.log 2>&1 &
|
| 30 |
+
The following command is about to run:
|
| 31 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval/inf.log 2>&1 &
|
| 32 |
+
The following command is about to run:
|
| 33 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa/inf.log 2>&1 &
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: [5, 7]
|
| 69 |
+
The following command is about to run:
|
| 70 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/eval/inf.log 2>&1 &
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: [1]
|
| 75 |
+
available_gpus: [0, 1]
|
| 76 |
+
The following command is about to run:
|
| 77 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/41/realqa/inf.log 2>&1 &
|
| 78 |
+
Wish me good luck!
|
deep_search/sft/4-24__doc_by_itself__add_math871.log
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Traceback (most recent call last):
|
| 2 |
+
File "/opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py", line 176, in <module>
|
| 3 |
+
ckpt = sys.argv[1]
|
| 4 |
+
IndexError: list index out of range
|
deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search.log
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
step_list: [78, 68, 55, 41]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/eval/inf.log 2>&1 &
|
| 19 |
+
skip evaluated model: JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/aime
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval/inf.log 2>&1 &
|
| 28 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 29 |
+
The following command is about to run:
|
| 30 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime/inf.log 2>&1 &
|
| 31 |
+
The following command is about to run:
|
| 32 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval/inf.log 2>&1 &
|
| 33 |
+
The following command is about to run:
|
| 34 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime/inf.log 2>&1 &
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: [4, 5]
|
| 42 |
+
The following command is about to run:
|
| 43 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/eval/inf.log 2>&1 &
|
| 44 |
+
Wish me good luck!
|
deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search_1.log
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
step_list: [78, 68, 55, 41]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/gaia/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/gaia/inf.log 2>&1 &
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia/inf.log 2>&1 &
|
| 20 |
+
The following command is about to run:
|
| 21 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia/inf.log 2>&1 &
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/gaia/inf.log 2>&1 &
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: [4, 5]
|
| 34 |
+
The following command is about to run:
|
| 35 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/gaia/inf.log 2>&1 &
|
| 36 |
+
Wish me good luck!
|
deep_search/sft/4-24_no_error_data_871_doc_by_itself_DeepSeek-R1-Distill-Qwen-7B.log
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
step_list: [78, 68, 55, 41]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/78/eval/inf.log 2>&1 &
|
| 7 |
+
skip evaluated model: JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/78/gaia
|
| 8 |
+
skip evaluated model: JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/78/aime
|
| 9 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/eval/inf.log 2>&1 &
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/gaia/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/aime/inf.log 2>&1 &
|
| 14 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55) to exist...
|
| 15 |
+
The checkpoint exists. Waiting for running...
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/eval/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/gaia/inf.log 2>&1 &
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/aime/inf.log 2>&1 &
|
| 19 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41) to exist...
|
| 20 |
+
The checkpoint exists. Waiting for running...
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/eval/inf.log 2>&1 &
|
| 22 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/gaia/inf.log 2>&1 &
|
| 23 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/aime/inf.log 2>&1 &
|
| 24 |
+
All checkpoints exist. Wait for runing...
|
| 25 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/78/eval/inf.log 2>&1 &
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/eval/inf.log 2>&1 &
|
| 30 |
+
The following command is about to run:
|
| 31 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/gaia/inf.log 2>&1 &
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: [6, 7]
|
| 61 |
+
The following command is about to run:
|
| 62 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/68/aime/inf.log 2>&1 &
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: [6, 7]
|
| 72 |
+
The following command is about to run:
|
| 73 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/eval/inf.log 2>&1 &
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: []
|
| 116 |
+
available_gpus: []
|
| 117 |
+
available_gpus: []
|
| 118 |
+
available_gpus: []
|
| 119 |
+
available_gpus: []
|
| 120 |
+
available_gpus: []
|
| 121 |
+
available_gpus: []
|
| 122 |
+
available_gpus: []
|
| 123 |
+
available_gpus: []
|
| 124 |
+
available_gpus: []
|
| 125 |
+
available_gpus: []
|
| 126 |
+
available_gpus: []
|
| 127 |
+
available_gpus: []
|
| 128 |
+
available_gpus: []
|
| 129 |
+
available_gpus: []
|
| 130 |
+
available_gpus: []
|
| 131 |
+
available_gpus: []
|
| 132 |
+
available_gpus: []
|
| 133 |
+
available_gpus: []
|
| 134 |
+
available_gpus: []
|
| 135 |
+
available_gpus: []
|
| 136 |
+
available_gpus: []
|
| 137 |
+
available_gpus: []
|
| 138 |
+
available_gpus: []
|
| 139 |
+
available_gpus: []
|
| 140 |
+
available_gpus: []
|
| 141 |
+
available_gpus: []
|
| 142 |
+
available_gpus: []
|
| 143 |
+
available_gpus: []
|
| 144 |
+
available_gpus: []
|
| 145 |
+
available_gpus: []
|
| 146 |
+
available_gpus: []
|
| 147 |
+
available_gpus: []
|
| 148 |
+
available_gpus: [4, 5]
|
| 149 |
+
The following command is about to run:
|
| 150 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/gaia/inf.log 2>&1 &
|
| 151 |
+
available_gpus: [4, 5]
|
| 152 |
+
The following command is about to run:
|
| 153 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/55/aime/inf.log 2>&1 &
|
| 154 |
+
available_gpus: []
|
| 155 |
+
available_gpus: []
|
| 156 |
+
available_gpus: []
|
| 157 |
+
available_gpus: []
|
| 158 |
+
available_gpus: []
|
| 159 |
+
available_gpus: [2, 3]
|
| 160 |
+
The following command is about to run:
|
| 161 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/eval/inf.log 2>&1 &
|
| 162 |
+
available_gpus: []
|
| 163 |
+
available_gpus: []
|
| 164 |
+
available_gpus: []
|
| 165 |
+
available_gpus: []
|
| 166 |
+
available_gpus: []
|
| 167 |
+
available_gpus: []
|
| 168 |
+
available_gpus: []
|
| 169 |
+
available_gpus: []
|
| 170 |
+
available_gpus: []
|
| 171 |
+
available_gpus: []
|
| 172 |
+
available_gpus: []
|
| 173 |
+
available_gpus: []
|
| 174 |
+
available_gpus: []
|
| 175 |
+
available_gpus: []
|
| 176 |
+
available_gpus: []
|
| 177 |
+
available_gpus: []
|
| 178 |
+
available_gpus: []
|
| 179 |
+
available_gpus: []
|
| 180 |
+
available_gpus: []
|
| 181 |
+
available_gpus: []
|
| 182 |
+
available_gpus: []
|
| 183 |
+
available_gpus: []
|
| 184 |
+
available_gpus: []
|
| 185 |
+
available_gpus: []
|
| 186 |
+
available_gpus: []
|
| 187 |
+
available_gpus: []
|
| 188 |
+
available_gpus: []
|
| 189 |
+
available_gpus: []
|
| 190 |
+
available_gpus: []
|
| 191 |
+
available_gpus: []
|
| 192 |
+
available_gpus: [6, 7]
|
| 193 |
+
The following command is about to run:
|
| 194 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/gaia/inf.log 2>&1 &
|
| 195 |
+
available_gpus: [4, 5]
|
| 196 |
+
The following command is about to run:
|
| 197 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:28315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-7B#TOKEN:DeepSeek-R1-Distill-Qwen-7B#BSZ:2#ACC:4_no_error_data_871/41/aime/inf.log 2>&1 &
|
| 198 |
+
Wish me good luck!
|
deep_search/sft/4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871_1.log
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [162, 136, 109, 81]
|
| 3 |
+
step_list: [162, 136, 109, 81]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-162) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-162 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/gaia/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-136) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-136 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/gaia/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-109) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-109 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/gaia/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-81) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/gaia/inf.log 2>&1 &
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [2, 3, 6, 7]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-162 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/gaia/inf.log 2>&1 &
|
| 20 |
+
The following command is about to run:
|
| 21 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-136 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/gaia/inf.log 2>&1 &
|
| 22 |
+
available_gpus: [2, 3, 6, 7]
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-109 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/gaia/inf.log 2>&1 &
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/gaia/inf.log 2>&1 &
|
| 27 |
+
Wish me good luck!
|
deep_search/sft/4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871_2.log
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41, 27, 13]
|
| 3 |
+
step_list: [78, 68, 55, 41, 27, 13]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia/inf.log 2>&1 &
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval/inf.log 2>&1 &
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/gaia/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/gaia/inf.log 2>&1 &
|
| 20 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-27) to exist...
|
| 21 |
+
The checkpoint exists. Waiting for running...
|
| 22 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/eval/inf.log 2>&1 &
|
| 23 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/gaia/inf.log 2>&1 &
|
| 24 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-13) to exist...
|
| 25 |
+
The checkpoint exists. Waiting for running...
|
| 26 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/eval/inf.log 2>&1 &
|
| 27 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/gaia/inf.log 2>&1 &
|
| 28 |
+
All checkpoints exist. Wait for runing...
|
| 29 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 30 |
+
The following command is about to run:
|
| 31 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/eval/inf.log 2>&1 &
|
| 32 |
+
The following command is about to run:
|
| 33 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/gaia/inf.log 2>&1 &
|
| 34 |
+
The following command is about to run:
|
| 35 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/eval/inf.log 2>&1 &
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: [4, 5]
|
| 45 |
+
The following command is about to run:
|
| 46 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/gaia/inf.log 2>&1 &
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: [4, 5]
|
| 56 |
+
The following command is about to run:
|
| 57 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/eval/inf.log 2>&1 &
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
deep_search/sft/4-25_no_error_data_871_doc_by_itself_QwQ-32B_1.log
ADDED
|
@@ -0,0 +1,746 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 54, 40, 27, 13]
|
| 3 |
+
step_list: [40, 27, 13]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/eval/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/gaia/inf.log 2>&1 &
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/aime/inf.log 2>&1 &
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/frames/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/eval/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/gaia/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/aime/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/frames/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/gaia/inf.log 2>&1 &
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/aime/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/frames/inf.log 2>&1 &
|
| 22 |
+
All checkpoints exist. Wait for runing...
|
| 23 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/eval/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/gaia/inf.log 2>&1 &
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/aime/inf.log 2>&1 &
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: [6, 7]
|
| 43 |
+
The following command is about to run:
|
| 44 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-40 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/40/frames/inf.log 2>&1 &
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: []
|
| 116 |
+
available_gpus: []
|
| 117 |
+
available_gpus: []
|
| 118 |
+
available_gpus: []
|
| 119 |
+
available_gpus: []
|
| 120 |
+
available_gpus: []
|
| 121 |
+
available_gpus: []
|
| 122 |
+
available_gpus: []
|
| 123 |
+
available_gpus: []
|
| 124 |
+
available_gpus: []
|
| 125 |
+
available_gpus: []
|
| 126 |
+
available_gpus: []
|
| 127 |
+
available_gpus: []
|
| 128 |
+
available_gpus: []
|
| 129 |
+
available_gpus: []
|
| 130 |
+
available_gpus: []
|
| 131 |
+
available_gpus: []
|
| 132 |
+
available_gpus: []
|
| 133 |
+
available_gpus: []
|
| 134 |
+
available_gpus: []
|
| 135 |
+
available_gpus: []
|
| 136 |
+
available_gpus: []
|
| 137 |
+
available_gpus: []
|
| 138 |
+
available_gpus: []
|
| 139 |
+
available_gpus: [4, 5]
|
| 140 |
+
The following command is about to run:
|
| 141 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/eval/inf.log 2>&1 &
|
| 142 |
+
available_gpus: []
|
| 143 |
+
available_gpus: []
|
| 144 |
+
available_gpus: []
|
| 145 |
+
available_gpus: []
|
| 146 |
+
available_gpus: []
|
| 147 |
+
available_gpus: []
|
| 148 |
+
available_gpus: []
|
| 149 |
+
available_gpus: []
|
| 150 |
+
available_gpus: []
|
| 151 |
+
available_gpus: []
|
| 152 |
+
available_gpus: []
|
| 153 |
+
available_gpus: []
|
| 154 |
+
available_gpus: []
|
| 155 |
+
available_gpus: []
|
| 156 |
+
available_gpus: []
|
| 157 |
+
available_gpus: []
|
| 158 |
+
available_gpus: []
|
| 159 |
+
available_gpus: []
|
| 160 |
+
available_gpus: []
|
| 161 |
+
available_gpus: []
|
| 162 |
+
available_gpus: []
|
| 163 |
+
available_gpus: []
|
| 164 |
+
available_gpus: []
|
| 165 |
+
available_gpus: []
|
| 166 |
+
available_gpus: []
|
| 167 |
+
available_gpus: []
|
| 168 |
+
available_gpus: []
|
| 169 |
+
available_gpus: []
|
| 170 |
+
available_gpus: []
|
| 171 |
+
available_gpus: []
|
| 172 |
+
available_gpus: []
|
| 173 |
+
available_gpus: []
|
| 174 |
+
available_gpus: []
|
| 175 |
+
available_gpus: []
|
| 176 |
+
available_gpus: []
|
| 177 |
+
available_gpus: []
|
| 178 |
+
available_gpus: []
|
| 179 |
+
available_gpus: []
|
| 180 |
+
available_gpus: []
|
| 181 |
+
available_gpus: []
|
| 182 |
+
available_gpus: []
|
| 183 |
+
available_gpus: []
|
| 184 |
+
available_gpus: []
|
| 185 |
+
available_gpus: []
|
| 186 |
+
available_gpus: []
|
| 187 |
+
available_gpus: []
|
| 188 |
+
available_gpus: []
|
| 189 |
+
available_gpus: []
|
| 190 |
+
available_gpus: []
|
| 191 |
+
available_gpus: []
|
| 192 |
+
available_gpus: []
|
| 193 |
+
available_gpus: []
|
| 194 |
+
available_gpus: []
|
| 195 |
+
available_gpus: []
|
| 196 |
+
available_gpus: []
|
| 197 |
+
available_gpus: []
|
| 198 |
+
available_gpus: []
|
| 199 |
+
available_gpus: []
|
| 200 |
+
available_gpus: []
|
| 201 |
+
available_gpus: []
|
| 202 |
+
available_gpus: []
|
| 203 |
+
available_gpus: []
|
| 204 |
+
available_gpus: []
|
| 205 |
+
available_gpus: []
|
| 206 |
+
available_gpus: []
|
| 207 |
+
available_gpus: []
|
| 208 |
+
available_gpus: []
|
| 209 |
+
available_gpus: []
|
| 210 |
+
available_gpus: []
|
| 211 |
+
available_gpus: []
|
| 212 |
+
available_gpus: []
|
| 213 |
+
available_gpus: []
|
| 214 |
+
available_gpus: []
|
| 215 |
+
available_gpus: []
|
| 216 |
+
available_gpus: []
|
| 217 |
+
available_gpus: []
|
| 218 |
+
available_gpus: []
|
| 219 |
+
available_gpus: []
|
| 220 |
+
available_gpus: []
|
| 221 |
+
available_gpus: []
|
| 222 |
+
available_gpus: []
|
| 223 |
+
available_gpus: []
|
| 224 |
+
available_gpus: []
|
| 225 |
+
available_gpus: []
|
| 226 |
+
available_gpus: []
|
| 227 |
+
available_gpus: []
|
| 228 |
+
available_gpus: []
|
| 229 |
+
available_gpus: []
|
| 230 |
+
available_gpus: []
|
| 231 |
+
available_gpus: []
|
| 232 |
+
available_gpus: []
|
| 233 |
+
available_gpus: []
|
| 234 |
+
available_gpus: []
|
| 235 |
+
available_gpus: []
|
| 236 |
+
available_gpus: []
|
| 237 |
+
available_gpus: []
|
| 238 |
+
available_gpus: []
|
| 239 |
+
available_gpus: []
|
| 240 |
+
available_gpus: []
|
| 241 |
+
available_gpus: []
|
| 242 |
+
available_gpus: []
|
| 243 |
+
available_gpus: []
|
| 244 |
+
available_gpus: []
|
| 245 |
+
available_gpus: []
|
| 246 |
+
available_gpus: []
|
| 247 |
+
available_gpus: []
|
| 248 |
+
available_gpus: []
|
| 249 |
+
available_gpus: []
|
| 250 |
+
available_gpus: []
|
| 251 |
+
available_gpus: []
|
| 252 |
+
available_gpus: []
|
| 253 |
+
available_gpus: []
|
| 254 |
+
available_gpus: []
|
| 255 |
+
available_gpus: []
|
| 256 |
+
available_gpus: []
|
| 257 |
+
available_gpus: []
|
| 258 |
+
available_gpus: []
|
| 259 |
+
available_gpus: []
|
| 260 |
+
available_gpus: []
|
| 261 |
+
available_gpus: []
|
| 262 |
+
available_gpus: []
|
| 263 |
+
available_gpus: []
|
| 264 |
+
available_gpus: []
|
| 265 |
+
available_gpus: []
|
| 266 |
+
available_gpus: []
|
| 267 |
+
available_gpus: []
|
| 268 |
+
available_gpus: []
|
| 269 |
+
available_gpus: []
|
| 270 |
+
available_gpus: []
|
| 271 |
+
available_gpus: []
|
| 272 |
+
available_gpus: []
|
| 273 |
+
available_gpus: []
|
| 274 |
+
available_gpus: []
|
| 275 |
+
available_gpus: []
|
| 276 |
+
available_gpus: []
|
| 277 |
+
available_gpus: []
|
| 278 |
+
available_gpus: []
|
| 279 |
+
available_gpus: []
|
| 280 |
+
available_gpus: []
|
| 281 |
+
available_gpus: []
|
| 282 |
+
available_gpus: []
|
| 283 |
+
available_gpus: []
|
| 284 |
+
available_gpus: []
|
| 285 |
+
available_gpus: []
|
| 286 |
+
available_gpus: []
|
| 287 |
+
available_gpus: []
|
| 288 |
+
available_gpus: []
|
| 289 |
+
available_gpus: []
|
| 290 |
+
available_gpus: []
|
| 291 |
+
available_gpus: []
|
| 292 |
+
available_gpus: []
|
| 293 |
+
available_gpus: []
|
| 294 |
+
available_gpus: []
|
| 295 |
+
available_gpus: []
|
| 296 |
+
available_gpus: []
|
| 297 |
+
available_gpus: []
|
| 298 |
+
available_gpus: []
|
| 299 |
+
available_gpus: []
|
| 300 |
+
available_gpus: []
|
| 301 |
+
available_gpus: []
|
| 302 |
+
available_gpus: []
|
| 303 |
+
available_gpus: []
|
| 304 |
+
available_gpus: []
|
| 305 |
+
available_gpus: []
|
| 306 |
+
available_gpus: []
|
| 307 |
+
available_gpus: []
|
| 308 |
+
available_gpus: []
|
| 309 |
+
available_gpus: []
|
| 310 |
+
available_gpus: []
|
| 311 |
+
available_gpus: []
|
| 312 |
+
available_gpus: []
|
| 313 |
+
available_gpus: []
|
| 314 |
+
available_gpus: []
|
| 315 |
+
available_gpus: []
|
| 316 |
+
available_gpus: []
|
| 317 |
+
available_gpus: []
|
| 318 |
+
available_gpus: []
|
| 319 |
+
available_gpus: []
|
| 320 |
+
available_gpus: []
|
| 321 |
+
available_gpus: []
|
| 322 |
+
available_gpus: []
|
| 323 |
+
available_gpus: []
|
| 324 |
+
available_gpus: []
|
| 325 |
+
available_gpus: []
|
| 326 |
+
available_gpus: []
|
| 327 |
+
available_gpus: []
|
| 328 |
+
available_gpus: []
|
| 329 |
+
available_gpus: []
|
| 330 |
+
available_gpus: []
|
| 331 |
+
available_gpus: []
|
| 332 |
+
available_gpus: []
|
| 333 |
+
available_gpus: []
|
| 334 |
+
available_gpus: []
|
| 335 |
+
available_gpus: []
|
| 336 |
+
available_gpus: []
|
| 337 |
+
available_gpus: []
|
| 338 |
+
available_gpus: []
|
| 339 |
+
available_gpus: []
|
| 340 |
+
available_gpus: []
|
| 341 |
+
available_gpus: []
|
| 342 |
+
available_gpus: []
|
| 343 |
+
available_gpus: []
|
| 344 |
+
available_gpus: []
|
| 345 |
+
available_gpus: []
|
| 346 |
+
available_gpus: []
|
| 347 |
+
available_gpus: []
|
| 348 |
+
available_gpus: []
|
| 349 |
+
available_gpus: []
|
| 350 |
+
available_gpus: []
|
| 351 |
+
available_gpus: []
|
| 352 |
+
available_gpus: []
|
| 353 |
+
available_gpus: []
|
| 354 |
+
available_gpus: []
|
| 355 |
+
available_gpus: []
|
| 356 |
+
available_gpus: []
|
| 357 |
+
available_gpus: []
|
| 358 |
+
available_gpus: []
|
| 359 |
+
available_gpus: []
|
| 360 |
+
available_gpus: []
|
| 361 |
+
available_gpus: []
|
| 362 |
+
available_gpus: []
|
| 363 |
+
available_gpus: []
|
| 364 |
+
available_gpus: []
|
| 365 |
+
available_gpus: []
|
| 366 |
+
available_gpus: []
|
| 367 |
+
available_gpus: []
|
| 368 |
+
available_gpus: []
|
| 369 |
+
available_gpus: []
|
| 370 |
+
available_gpus: []
|
| 371 |
+
available_gpus: []
|
| 372 |
+
available_gpus: []
|
| 373 |
+
available_gpus: []
|
| 374 |
+
available_gpus: []
|
| 375 |
+
available_gpus: []
|
| 376 |
+
available_gpus: []
|
| 377 |
+
available_gpus: []
|
| 378 |
+
available_gpus: []
|
| 379 |
+
available_gpus: []
|
| 380 |
+
available_gpus: []
|
| 381 |
+
available_gpus: []
|
| 382 |
+
available_gpus: []
|
| 383 |
+
available_gpus: []
|
| 384 |
+
available_gpus: []
|
| 385 |
+
available_gpus: []
|
| 386 |
+
available_gpus: []
|
| 387 |
+
available_gpus: []
|
| 388 |
+
available_gpus: []
|
| 389 |
+
available_gpus: []
|
| 390 |
+
available_gpus: []
|
| 391 |
+
available_gpus: []
|
| 392 |
+
available_gpus: []
|
| 393 |
+
available_gpus: []
|
| 394 |
+
available_gpus: []
|
| 395 |
+
available_gpus: []
|
| 396 |
+
available_gpus: []
|
| 397 |
+
available_gpus: []
|
| 398 |
+
available_gpus: []
|
| 399 |
+
available_gpus: []
|
| 400 |
+
available_gpus: []
|
| 401 |
+
available_gpus: []
|
| 402 |
+
available_gpus: []
|
| 403 |
+
available_gpus: []
|
| 404 |
+
available_gpus: []
|
| 405 |
+
available_gpus: []
|
| 406 |
+
available_gpus: []
|
| 407 |
+
available_gpus: []
|
| 408 |
+
available_gpus: []
|
| 409 |
+
available_gpus: []
|
| 410 |
+
available_gpus: []
|
| 411 |
+
available_gpus: []
|
| 412 |
+
available_gpus: []
|
| 413 |
+
available_gpus: []
|
| 414 |
+
available_gpus: []
|
| 415 |
+
available_gpus: []
|
| 416 |
+
available_gpus: []
|
| 417 |
+
available_gpus: []
|
| 418 |
+
available_gpus: []
|
| 419 |
+
available_gpus: []
|
| 420 |
+
available_gpus: []
|
| 421 |
+
available_gpus: []
|
| 422 |
+
available_gpus: []
|
| 423 |
+
available_gpus: []
|
| 424 |
+
available_gpus: []
|
| 425 |
+
available_gpus: []
|
| 426 |
+
available_gpus: []
|
| 427 |
+
available_gpus: []
|
| 428 |
+
available_gpus: []
|
| 429 |
+
available_gpus: []
|
| 430 |
+
available_gpus: []
|
| 431 |
+
available_gpus: []
|
| 432 |
+
available_gpus: []
|
| 433 |
+
available_gpus: []
|
| 434 |
+
available_gpus: []
|
| 435 |
+
available_gpus: []
|
| 436 |
+
available_gpus: []
|
| 437 |
+
available_gpus: []
|
| 438 |
+
available_gpus: []
|
| 439 |
+
available_gpus: []
|
| 440 |
+
available_gpus: []
|
| 441 |
+
available_gpus: []
|
| 442 |
+
available_gpus: []
|
| 443 |
+
available_gpus: []
|
| 444 |
+
available_gpus: []
|
| 445 |
+
available_gpus: []
|
| 446 |
+
available_gpus: []
|
| 447 |
+
available_gpus: []
|
| 448 |
+
available_gpus: []
|
| 449 |
+
available_gpus: []
|
| 450 |
+
available_gpus: []
|
| 451 |
+
available_gpus: []
|
| 452 |
+
available_gpus: []
|
| 453 |
+
available_gpus: []
|
| 454 |
+
available_gpus: []
|
| 455 |
+
available_gpus: []
|
| 456 |
+
available_gpus: []
|
| 457 |
+
available_gpus: []
|
| 458 |
+
available_gpus: []
|
| 459 |
+
available_gpus: []
|
| 460 |
+
available_gpus: []
|
| 461 |
+
available_gpus: []
|
| 462 |
+
available_gpus: []
|
| 463 |
+
available_gpus: []
|
| 464 |
+
available_gpus: []
|
| 465 |
+
available_gpus: []
|
| 466 |
+
available_gpus: []
|
| 467 |
+
available_gpus: []
|
| 468 |
+
available_gpus: []
|
| 469 |
+
available_gpus: []
|
| 470 |
+
available_gpus: []
|
| 471 |
+
available_gpus: []
|
| 472 |
+
available_gpus: []
|
| 473 |
+
available_gpus: []
|
| 474 |
+
available_gpus: []
|
| 475 |
+
available_gpus: []
|
| 476 |
+
available_gpus: []
|
| 477 |
+
available_gpus: []
|
| 478 |
+
available_gpus: []
|
| 479 |
+
available_gpus: []
|
| 480 |
+
available_gpus: []
|
| 481 |
+
available_gpus: [2, 3]
|
| 482 |
+
The following command is about to run:
|
| 483 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/gaia/inf.log 2>&1 &
|
| 484 |
+
available_gpus: []
|
| 485 |
+
available_gpus: []
|
| 486 |
+
available_gpus: []
|
| 487 |
+
available_gpus: []
|
| 488 |
+
available_gpus: []
|
| 489 |
+
available_gpus: []
|
| 490 |
+
available_gpus: []
|
| 491 |
+
available_gpus: []
|
| 492 |
+
available_gpus: []
|
| 493 |
+
available_gpus: []
|
| 494 |
+
available_gpus: []
|
| 495 |
+
available_gpus: []
|
| 496 |
+
available_gpus: []
|
| 497 |
+
available_gpus: []
|
| 498 |
+
available_gpus: []
|
| 499 |
+
available_gpus: []
|
| 500 |
+
available_gpus: []
|
| 501 |
+
available_gpus: []
|
| 502 |
+
available_gpus: []
|
| 503 |
+
available_gpus: []
|
| 504 |
+
available_gpus: []
|
| 505 |
+
available_gpus: []
|
| 506 |
+
available_gpus: []
|
| 507 |
+
available_gpus: []
|
| 508 |
+
available_gpus: []
|
| 509 |
+
available_gpus: []
|
| 510 |
+
available_gpus: []
|
| 511 |
+
available_gpus: []
|
| 512 |
+
available_gpus: []
|
| 513 |
+
available_gpus: []
|
| 514 |
+
available_gpus: []
|
| 515 |
+
available_gpus: []
|
| 516 |
+
available_gpus: []
|
| 517 |
+
available_gpus: []
|
| 518 |
+
available_gpus: []
|
| 519 |
+
available_gpus: []
|
| 520 |
+
available_gpus: []
|
| 521 |
+
available_gpus: []
|
| 522 |
+
available_gpus: []
|
| 523 |
+
available_gpus: []
|
| 524 |
+
available_gpus: []
|
| 525 |
+
available_gpus: []
|
| 526 |
+
available_gpus: []
|
| 527 |
+
available_gpus: []
|
| 528 |
+
available_gpus: []
|
| 529 |
+
available_gpus: []
|
| 530 |
+
available_gpus: []
|
| 531 |
+
available_gpus: []
|
| 532 |
+
available_gpus: []
|
| 533 |
+
available_gpus: []
|
| 534 |
+
available_gpus: []
|
| 535 |
+
available_gpus: []
|
| 536 |
+
available_gpus: []
|
| 537 |
+
available_gpus: []
|
| 538 |
+
available_gpus: []
|
| 539 |
+
available_gpus: []
|
| 540 |
+
available_gpus: []
|
| 541 |
+
available_gpus: []
|
| 542 |
+
available_gpus: []
|
| 543 |
+
available_gpus: []
|
| 544 |
+
available_gpus: []
|
| 545 |
+
available_gpus: []
|
| 546 |
+
available_gpus: []
|
| 547 |
+
available_gpus: [2, 3]
|
| 548 |
+
The following command is about to run:
|
| 549 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/aime/inf.log 2>&1 &
|
| 550 |
+
available_gpus: []
|
| 551 |
+
available_gpus: []
|
| 552 |
+
available_gpus: []
|
| 553 |
+
available_gpus: []
|
| 554 |
+
available_gpus: []
|
| 555 |
+
available_gpus: []
|
| 556 |
+
available_gpus: []
|
| 557 |
+
available_gpus: []
|
| 558 |
+
available_gpus: [2, 3]
|
| 559 |
+
The following command is about to run:
|
| 560 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/27/frames/inf.log 2>&1 &
|
| 561 |
+
available_gpus: []
|
| 562 |
+
available_gpus: []
|
| 563 |
+
available_gpus: []
|
| 564 |
+
available_gpus: []
|
| 565 |
+
available_gpus: []
|
| 566 |
+
available_gpus: []
|
| 567 |
+
available_gpus: []
|
| 568 |
+
available_gpus: []
|
| 569 |
+
available_gpus: []
|
| 570 |
+
available_gpus: []
|
| 571 |
+
available_gpus: []
|
| 572 |
+
available_gpus: []
|
| 573 |
+
available_gpus: []
|
| 574 |
+
available_gpus: []
|
| 575 |
+
available_gpus: []
|
| 576 |
+
available_gpus: []
|
| 577 |
+
available_gpus: []
|
| 578 |
+
available_gpus: []
|
| 579 |
+
available_gpus: []
|
| 580 |
+
available_gpus: []
|
| 581 |
+
available_gpus: []
|
| 582 |
+
available_gpus: []
|
| 583 |
+
available_gpus: []
|
| 584 |
+
available_gpus: []
|
| 585 |
+
available_gpus: []
|
| 586 |
+
available_gpus: []
|
| 587 |
+
available_gpus: []
|
| 588 |
+
available_gpus: []
|
| 589 |
+
available_gpus: []
|
| 590 |
+
available_gpus: []
|
| 591 |
+
available_gpus: []
|
| 592 |
+
available_gpus: []
|
| 593 |
+
available_gpus: []
|
| 594 |
+
available_gpus: []
|
| 595 |
+
available_gpus: []
|
| 596 |
+
available_gpus: []
|
| 597 |
+
available_gpus: []
|
| 598 |
+
available_gpus: []
|
| 599 |
+
available_gpus: []
|
| 600 |
+
available_gpus: []
|
| 601 |
+
available_gpus: []
|
| 602 |
+
available_gpus: []
|
| 603 |
+
available_gpus: []
|
| 604 |
+
available_gpus: []
|
| 605 |
+
available_gpus: []
|
| 606 |
+
available_gpus: []
|
| 607 |
+
available_gpus: []
|
| 608 |
+
available_gpus: []
|
| 609 |
+
available_gpus: []
|
| 610 |
+
available_gpus: []
|
| 611 |
+
available_gpus: []
|
| 612 |
+
available_gpus: []
|
| 613 |
+
available_gpus: []
|
| 614 |
+
available_gpus: []
|
| 615 |
+
available_gpus: []
|
| 616 |
+
available_gpus: []
|
| 617 |
+
available_gpus: []
|
| 618 |
+
available_gpus: []
|
| 619 |
+
available_gpus: [4, 5]
|
| 620 |
+
The following command is about to run:
|
| 621 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/eval/inf.log 2>&1 &
|
| 622 |
+
available_gpus: []
|
| 623 |
+
available_gpus: [6, 7]
|
| 624 |
+
The following command is about to run:
|
| 625 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/gaia/inf.log 2>&1 &
|
| 626 |
+
available_gpus: []
|
| 627 |
+
available_gpus: []
|
| 628 |
+
available_gpus: []
|
| 629 |
+
available_gpus: []
|
| 630 |
+
available_gpus: []
|
| 631 |
+
available_gpus: []
|
| 632 |
+
available_gpus: []
|
| 633 |
+
available_gpus: []
|
| 634 |
+
available_gpus: []
|
| 635 |
+
available_gpus: []
|
| 636 |
+
available_gpus: []
|
| 637 |
+
available_gpus: []
|
| 638 |
+
available_gpus: []
|
| 639 |
+
available_gpus: []
|
| 640 |
+
available_gpus: []
|
| 641 |
+
available_gpus: []
|
| 642 |
+
available_gpus: []
|
| 643 |
+
available_gpus: []
|
| 644 |
+
available_gpus: []
|
| 645 |
+
available_gpus: []
|
| 646 |
+
available_gpus: []
|
| 647 |
+
available_gpus: []
|
| 648 |
+
available_gpus: []
|
| 649 |
+
available_gpus: []
|
| 650 |
+
available_gpus: []
|
| 651 |
+
available_gpus: []
|
| 652 |
+
available_gpus: []
|
| 653 |
+
available_gpus: []
|
| 654 |
+
available_gpus: []
|
| 655 |
+
available_gpus: []
|
| 656 |
+
available_gpus: []
|
| 657 |
+
available_gpus: []
|
| 658 |
+
available_gpus: []
|
| 659 |
+
available_gpus: []
|
| 660 |
+
available_gpus: []
|
| 661 |
+
available_gpus: []
|
| 662 |
+
available_gpus: []
|
| 663 |
+
available_gpus: []
|
| 664 |
+
available_gpus: []
|
| 665 |
+
available_gpus: []
|
| 666 |
+
available_gpus: []
|
| 667 |
+
available_gpus: []
|
| 668 |
+
available_gpus: []
|
| 669 |
+
available_gpus: []
|
| 670 |
+
available_gpus: []
|
| 671 |
+
available_gpus: []
|
| 672 |
+
available_gpus: []
|
| 673 |
+
available_gpus: []
|
| 674 |
+
available_gpus: []
|
| 675 |
+
available_gpus: []
|
| 676 |
+
available_gpus: []
|
| 677 |
+
available_gpus: []
|
| 678 |
+
available_gpus: []
|
| 679 |
+
available_gpus: []
|
| 680 |
+
available_gpus: []
|
| 681 |
+
available_gpus: []
|
| 682 |
+
available_gpus: []
|
| 683 |
+
available_gpus: []
|
| 684 |
+
available_gpus: []
|
| 685 |
+
available_gpus: []
|
| 686 |
+
available_gpus: []
|
| 687 |
+
available_gpus: []
|
| 688 |
+
available_gpus: []
|
| 689 |
+
available_gpus: []
|
| 690 |
+
available_gpus: []
|
| 691 |
+
available_gpus: []
|
| 692 |
+
available_gpus: []
|
| 693 |
+
available_gpus: []
|
| 694 |
+
available_gpus: []
|
| 695 |
+
available_gpus: []
|
| 696 |
+
available_gpus: []
|
| 697 |
+
available_gpus: []
|
| 698 |
+
available_gpus: []
|
| 699 |
+
available_gpus: []
|
| 700 |
+
available_gpus: []
|
| 701 |
+
available_gpus: []
|
| 702 |
+
available_gpus: []
|
| 703 |
+
available_gpus: []
|
| 704 |
+
available_gpus: []
|
| 705 |
+
available_gpus: []
|
| 706 |
+
available_gpus: []
|
| 707 |
+
available_gpus: []
|
| 708 |
+
available_gpus: []
|
| 709 |
+
available_gpus: []
|
| 710 |
+
available_gpus: []
|
| 711 |
+
available_gpus: []
|
| 712 |
+
available_gpus: []
|
| 713 |
+
available_gpus: []
|
| 714 |
+
available_gpus: []
|
| 715 |
+
available_gpus: []
|
| 716 |
+
available_gpus: []
|
| 717 |
+
available_gpus: []
|
| 718 |
+
available_gpus: []
|
| 719 |
+
available_gpus: []
|
| 720 |
+
available_gpus: []
|
| 721 |
+
available_gpus: []
|
| 722 |
+
available_gpus: []
|
| 723 |
+
available_gpus: []
|
| 724 |
+
available_gpus: []
|
| 725 |
+
available_gpus: []
|
| 726 |
+
available_gpus: []
|
| 727 |
+
available_gpus: []
|
| 728 |
+
available_gpus: []
|
| 729 |
+
available_gpus: []
|
| 730 |
+
available_gpus: []
|
| 731 |
+
available_gpus: [6, 7]
|
| 732 |
+
The following command is about to run:
|
| 733 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/aime/inf.log 2>&1 &
|
| 734 |
+
available_gpus: []
|
| 735 |
+
available_gpus: []
|
| 736 |
+
available_gpus: []
|
| 737 |
+
available_gpus: []
|
| 738 |
+
available_gpus: []
|
| 739 |
+
available_gpus: []
|
| 740 |
+
available_gpus: []
|
| 741 |
+
available_gpus: []
|
| 742 |
+
available_gpus: []
|
| 743 |
+
available_gpus: [6, 7]
|
| 744 |
+
The following command is about to run:
|
| 745 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/13/frames/inf.log 2>&1 &
|
| 746 |
+
Wish me good luck!
|
deep_search/sft/4-4_no_error_data_871_doc_by_itself_DeepSeek-R1-Distill-Qwen-32.log
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
step_list: [78, 68, 55, 41]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-68) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-55) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-41) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500/inf.log 2>&1 &
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [6, 7]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500/inf.log 2>&1 &
|
| 20 |
+
available_gpus: []
|
| 21 |
+
available_gpus: []
|
| 22 |
+
available_gpus: []
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: [4, 5]
|
| 40 |
+
The following command is about to run:
|
| 41 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500/inf.log 2>&1 &
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: [2, 3]
|
| 106 |
+
The following command is about to run:
|
| 107 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500/inf.log 2>&1 &
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: [0, 1]
|
| 113 |
+
The following command is about to run:
|
| 114 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500/inf.log 2>&1 &
|
| 115 |
+
Wish me good luck!
|
deep_search/sft/5-5_ablation_ques_domain_filtered_data_738_Qwen2.5-7B-Instruct.log
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [66, 57, 46, 34, 23]
|
| 2 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-66) to exist...
|
| 3 |
+
The checkpoint exists. Waiting for running...
|
| 4 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/gaia/inf.log 2>&1 &
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/bamboogle/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-57) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/gaia/inf.log 2>&1 &
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/bamboogle/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-46) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/gaia/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/bamboogle/inf.log 2>&1 &
|
| 14 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-34) to exist...
|
| 15 |
+
The checkpoint exists. Waiting for running...
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/gaia/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/bamboogle/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-23) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/gaia/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/bamboogle/inf.log 2>&1 &
|
| 22 |
+
All checkpoints exist. Wait for runing...
|
| 23 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/gaia/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/66/bamboogle/inf.log 2>&1 &
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/gaia/inf.log 2>&1 &
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: [4, 5]
|
| 56 |
+
The following command is about to run:
|
| 57 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/57/bamboogle/inf.log 2>&1 &
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: [4, 5]
|
| 73 |
+
The following command is about to run:
|
| 74 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/gaia/inf.log 2>&1 &
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: [6, 7]
|
| 82 |
+
The following command is about to run:
|
| 83 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/46/bamboogle/inf.log 2>&1 &
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: [2, 3]
|
| 88 |
+
The following command is about to run:
|
| 89 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/gaia/inf.log 2>&1 &
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: [6, 7]
|
| 102 |
+
The following command is about to run:
|
| 103 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/34/bamboogle/inf.log 2>&1 &
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: []
|
| 116 |
+
available_gpus: []
|
| 117 |
+
available_gpus: []
|
| 118 |
+
available_gpus: []
|
| 119 |
+
available_gpus: []
|
| 120 |
+
available_gpus: []
|
| 121 |
+
available_gpus: []
|
| 122 |
+
available_gpus: [6, 7]
|
| 123 |
+
The following command is about to run:
|
| 124 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/gaia/inf.log 2>&1 &
|
| 125 |
+
available_gpus: [4, 5]
|
| 126 |
+
The following command is about to run:
|
| 127 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:14123#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_domain_filtered_data_738_ablation/23/bamboogle/inf.log 2>&1 &
|
| 128 |
+
Wish me good luck!
|
deep_search/sft/5-5_ablation_ques_keywords_filtered_data_727_Qwen2.5-7B-Instruct.log
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [66, 57, 46, 34, 23]
|
| 2 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-66) to exist...
|
| 3 |
+
The checkpoint exists. Waiting for running...
|
| 4 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/gaia/inf.log 2>&1 &
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/bamboogle/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-57) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/gaia/inf.log 2>&1 &
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/bamboogle/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-46) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/gaia/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/bamboogle/inf.log 2>&1 &
|
| 14 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-34) to exist...
|
| 15 |
+
The checkpoint exists. Waiting for running...
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/gaia/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/bamboogle/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-23) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/gaia/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/bamboogle/inf.log 2>&1 &
|
| 22 |
+
All checkpoints exist. Wait for runing...
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: [6, 7]
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/gaia/inf.log 2>&1 &
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: [4, 5, 6, 7]
|
| 47 |
+
The following command is about to run:
|
| 48 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-66 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/66/bamboogle/inf.log 2>&1 &
|
| 49 |
+
The following command is about to run:
|
| 50 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/gaia/inf.log 2>&1 &
|
| 51 |
+
available_gpus: [4, 5]
|
| 52 |
+
The following command is about to run:
|
| 53 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-57 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/57/bamboogle/inf.log 2>&1 &
|
| 54 |
+
available_gpus: [5]
|
| 55 |
+
available_gpus: [4, 5]
|
| 56 |
+
The following command is about to run:
|
| 57 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/gaia/inf.log 2>&1 &
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: [2, 3]
|
| 61 |
+
The following command is about to run:
|
| 62 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-46 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/46/bamboogle/inf.log 2>&1 &
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: [2, 3]
|
| 88 |
+
The following command is about to run:
|
| 89 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/gaia/inf.log 2>&1 &
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: [6, 7]
|
| 100 |
+
The following command is about to run:
|
| 101 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-34 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/34/bamboogle/inf.log 2>&1 &
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: [4, 5]
|
| 116 |
+
The following command is about to run:
|
| 117 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/gaia/inf.log 2>&1 &
|
| 118 |
+
available_gpus: []
|
| 119 |
+
available_gpus: []
|
| 120 |
+
available_gpus: []
|
| 121 |
+
available_gpus: []
|
| 122 |
+
available_gpus: []
|
| 123 |
+
available_gpus: []
|
| 124 |
+
available_gpus: []
|
| 125 |
+
available_gpus: [6, 7]
|
| 126 |
+
The following command is about to run:
|
| 127 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/checkpoint-23 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:29477#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_keywords_filtered_data_727_ablation/23/bamboogle/inf.log 2>&1 &
|
| 128 |
+
Wish me good luck!
|
deep_search/sft/5-5_ablation_ques_yiwenci_filtered_data_811_Qwen2.5-7B-Instruct.log
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [72, 63, 51, 38, 25]
|
| 2 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-72) to exist...
|
| 3 |
+
The checkpoint exists. Waiting for running...
|
| 4 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-72 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/gaia/inf.log 2>&1 &
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-72 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/bamboogle/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-63) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-63 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/gaia/inf.log 2>&1 &
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-63 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/bamboogle/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-51) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/gaia/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/bamboogle/inf.log 2>&1 &
|
| 14 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-38) to exist...
|
| 15 |
+
The checkpoint exists. Waiting for running...
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-38 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/gaia/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-38 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/bamboogle/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-25) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-25 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/gaia/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-25 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/bamboogle/inf.log 2>&1 &
|
| 22 |
+
All checkpoints exist. Wait for runing...
|
| 23 |
+
available_gpus: [6, 7]
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-72 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/gaia/inf.log 2>&1 &
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: [4, 5]
|
| 40 |
+
The following command is about to run:
|
| 41 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-72 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/72/bamboogle/inf.log 2>&1 &
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: [6, 7]
|
| 50 |
+
The following command is about to run:
|
| 51 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-63 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/gaia/inf.log 2>&1 &
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: [6]
|
| 54 |
+
available_gpus: [6]
|
| 55 |
+
available_gpus: [6]
|
| 56 |
+
available_gpus: [6]
|
| 57 |
+
available_gpus: [6]
|
| 58 |
+
available_gpus: [6]
|
| 59 |
+
available_gpus: [6]
|
| 60 |
+
available_gpus: [6]
|
| 61 |
+
available_gpus: [6]
|
| 62 |
+
available_gpus: [6]
|
| 63 |
+
available_gpus: [2, 3, 6]
|
| 64 |
+
The following command is about to run:
|
| 65 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-63 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/63/bamboogle/inf.log 2>&1 &
|
| 66 |
+
available_gpus: [6]
|
| 67 |
+
available_gpus: [6]
|
| 68 |
+
available_gpus: [6]
|
| 69 |
+
available_gpus: [6]
|
| 70 |
+
available_gpus: [6]
|
| 71 |
+
available_gpus: [6]
|
| 72 |
+
available_gpus: [6]
|
| 73 |
+
available_gpus: [6]
|
| 74 |
+
available_gpus: [6]
|
| 75 |
+
available_gpus: [6]
|
| 76 |
+
available_gpus: [6]
|
| 77 |
+
available_gpus: [6]
|
| 78 |
+
available_gpus: [6]
|
| 79 |
+
available_gpus: [6]
|
| 80 |
+
available_gpus: [6]
|
| 81 |
+
available_gpus: [2, 3, 6]
|
| 82 |
+
The following command is about to run:
|
| 83 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/gaia/inf.log 2>&1 &
|
| 84 |
+
available_gpus: [6]
|
| 85 |
+
available_gpus: [6]
|
| 86 |
+
available_gpus: [6]
|
| 87 |
+
available_gpus: [6]
|
| 88 |
+
available_gpus: [6]
|
| 89 |
+
available_gpus: [6]
|
| 90 |
+
available_gpus: [6]
|
| 91 |
+
available_gpus: [6]
|
| 92 |
+
available_gpus: [6]
|
| 93 |
+
available_gpus: [6]
|
| 94 |
+
available_gpus: [6]
|
| 95 |
+
available_gpus: [6]
|
| 96 |
+
available_gpus: [6]
|
| 97 |
+
available_gpus: [6]
|
| 98 |
+
available_gpus: [6]
|
| 99 |
+
available_gpus: [6]
|
| 100 |
+
available_gpus: [6]
|
| 101 |
+
available_gpus: [6]
|
| 102 |
+
available_gpus: [6]
|
| 103 |
+
available_gpus: [6]
|
| 104 |
+
available_gpus: [6]
|
| 105 |
+
available_gpus: [6]
|
| 106 |
+
available_gpus: [6]
|
| 107 |
+
available_gpus: [6]
|
| 108 |
+
available_gpus: [6]
|
| 109 |
+
available_gpus: [4, 5, 6]
|
| 110 |
+
The following command is about to run:
|
| 111 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/51/bamboogle/inf.log 2>&1 &
|
| 112 |
+
available_gpus: [6]
|
| 113 |
+
available_gpus: [6]
|
| 114 |
+
available_gpus: [6]
|
| 115 |
+
available_gpus: [6]
|
| 116 |
+
available_gpus: [6]
|
| 117 |
+
available_gpus: [6]
|
| 118 |
+
available_gpus: [6]
|
| 119 |
+
available_gpus: [6]
|
| 120 |
+
available_gpus: [6]
|
| 121 |
+
available_gpus: [6]
|
| 122 |
+
available_gpus: [2, 3, 6]
|
| 123 |
+
The following command is about to run:
|
| 124 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-38 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/gaia/inf.log 2>&1 &
|
| 125 |
+
available_gpus: [6]
|
| 126 |
+
available_gpus: [6]
|
| 127 |
+
available_gpus: [6]
|
| 128 |
+
available_gpus: [6]
|
| 129 |
+
available_gpus: [6]
|
| 130 |
+
available_gpus: [4, 5, 6]
|
| 131 |
+
The following command is about to run:
|
| 132 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-38 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/38/bamboogle/inf.log 2>&1 &
|
| 133 |
+
available_gpus: [6]
|
| 134 |
+
available_gpus: [6]
|
| 135 |
+
available_gpus: [6]
|
| 136 |
+
available_gpus: [6]
|
| 137 |
+
available_gpus: [6]
|
| 138 |
+
available_gpus: [6]
|
| 139 |
+
available_gpus: [6]
|
| 140 |
+
available_gpus: [6]
|
| 141 |
+
available_gpus: [6]
|
| 142 |
+
available_gpus: [6]
|
| 143 |
+
available_gpus: [6]
|
| 144 |
+
available_gpus: [6]
|
| 145 |
+
available_gpus: [6]
|
| 146 |
+
available_gpus: [6]
|
| 147 |
+
available_gpus: [6]
|
| 148 |
+
available_gpus: [6]
|
| 149 |
+
available_gpus: [6]
|
| 150 |
+
available_gpus: [6]
|
| 151 |
+
available_gpus: [6]
|
| 152 |
+
available_gpus: [5, 6]
|
| 153 |
+
The following command is about to run:
|
| 154 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-25 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/gaia/inf.log 2>&1 &
|
| 155 |
+
available_gpus: [4]
|
| 156 |
+
available_gpus: [4]
|
| 157 |
+
available_gpus: [4]
|
| 158 |
+
available_gpus: [4]
|
| 159 |
+
available_gpus: [4]
|
| 160 |
+
available_gpus: [4]
|
| 161 |
+
available_gpus: [4]
|
| 162 |
+
available_gpus: [4]
|
| 163 |
+
available_gpus: [4]
|
| 164 |
+
available_gpus: [4]
|
| 165 |
+
available_gpus: [4]
|
| 166 |
+
available_gpus: [4]
|
| 167 |
+
available_gpus: [4]
|
| 168 |
+
available_gpus: [4]
|
| 169 |
+
available_gpus: [4]
|
| 170 |
+
available_gpus: [2, 3, 4]
|
| 171 |
+
The following command is about to run:
|
| 172 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/checkpoint-25 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10634#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_ques_yiwenci_filtered_data_811_ablation/25/bamboogle/inf.log 2>&1 &
|
| 173 |
+
Wish me good luck!
|
deep_search/sft/5-5_ablation_resp_format_1064_random_sample_871_Qwen2.5-7B-Instruct.log
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [78, 68, 55, 41, 27]
|
| 2 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-78) to exist...
|
| 3 |
+
The checkpoint exists. Waiting for running...
|
| 4 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/gaia/inf.log 2>&1 &
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/bamboogle/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-68) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/gaia/inf.log 2>&1 &
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/bamboogle/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-55) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/55/gaia/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/55/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/55/bamboogle/inf.log 2>&1 &
|
| 14 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-41) to exist...
|
| 15 |
+
The checkpoint exists. Waiting for running...
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/41/gaia/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/41/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/41/bamboogle/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-27) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/27/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/27/gaia/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/27/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/27/bamboogle/inf.log 2>&1 &
|
| 22 |
+
All checkpoints exist. Wait for runing...
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: [6, 7]
|
| 42 |
+
The following command is about to run:
|
| 43 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/gaia/inf.log 2>&1 &
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: [6, 7]
|
| 46 |
+
The following command is about to run:
|
| 47 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/78/bamboogle/inf.log 2>&1 &
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: [2, 3]
|
| 50 |
+
The following command is about to run:
|
| 51 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/gaia/inf.log 2>&1 &
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: [4, 5]
|
| 56 |
+
The following command is about to run:
|
| 57 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30702#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_resp_format_1064_random_sample_871_ablation/68/bamboogle/inf.log 2>&1 &
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
deep_search/sft/ds_zero3_offload.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bf16": {
|
| 3 |
+
"enabled": "auto"
|
| 4 |
+
},
|
| 5 |
+
"zero_optimization": {
|
| 6 |
+
"stage": 3,
|
| 7 |
+
"offload_optimizer": {
|
| 8 |
+
"device": "cpu",
|
| 9 |
+
"pin_memory": true
|
| 10 |
+
},
|
| 11 |
+
"offload_param": {
|
| 12 |
+
"device": "cpu",
|
| 13 |
+
"pin_memory": true
|
| 14 |
+
},
|
| 15 |
+
"overlap_comm": true,
|
| 16 |
+
"contiguous_gradients": true,
|
| 17 |
+
"sub_group_size": 5E8,
|
| 18 |
+
"reduce_bucket_size": "auto",
|
| 19 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 20 |
+
"stage3_param_persistence_threshold": "auto",
|
| 21 |
+
"stage3_max_live_parameters": 5E8,
|
| 22 |
+
"stage3_max_reuse_distance": 5E8,
|
| 23 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 24 |
+
},
|
| 25 |
+
"gradient_accumulation_steps": "auto",
|
| 26 |
+
"gradient_clipping": "auto",
|
| 27 |
+
"steps_per_print": 2000,
|
| 28 |
+
"train_batch_size": "auto",
|
| 29 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 30 |
+
"wall_clock_breakdown": false,
|
| 31 |
+
"flops_profiler": {
|
| 32 |
+
"enabled": false,
|
| 33 |
+
"profile_step": 10,
|
| 34 |
+
"module_depth": -1,
|
| 35 |
+
"top_modules": 3,
|
| 36 |
+
"detailed": true,
|
| 37 |
+
"output_file": "flops_profiler.out"
|
| 38 |
+
}
|
| 39 |
+
}
|
deep_search/sft/gen_data_2.log
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Traceback (most recent call last):
|
| 2 |
+
File "/opt/aps/workdir/sunshuang/deep_search/search_o1/gen_data.py", line 4, in <module>
|
| 3 |
+
from vllm import LLM, SamplingParams
|
| 4 |
+
ModuleNotFoundError: No module named 'vllm'
|
deep_search/sft/hostfile
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
lmlabide-824b9f7c-6967-4403-8cf6-8911048f903a-worker-0 slots=8
|
| 2 |
+
lmlabide-824b9f7c-6967-4403-8cf6-8911048f903a-worker-1 slots=8
|
deep_search/sft/mix.sh
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# export CUDA_VISIBLE_DEVICES=0,1,2,3
|
| 6 |
+
# --include localhost:0,1,2,3,4,5,6,7 \
|
| 7 |
+
|
| 8 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 9 |
+
# --master_port=9944 \
|
| 10 |
+
# --include localhost:4,5,6,7 \
|
| 11 |
+
# sft.py \
|
| 12 |
+
# --deepspeed ds_zero3.json \
|
| 13 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 14 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-1.5B \
|
| 15 |
+
# --do_train \
|
| 16 |
+
# --save_safetensors true \
|
| 17 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/merged_selected_data.json \
|
| 18 |
+
# --lr_scheduler_type cosine \
|
| 19 |
+
# --output_dir output/checkpoint/qwen_32B_test \
|
| 20 |
+
# --overwrite_output_dir \
|
| 21 |
+
# --warmup_ratio 0.03 \
|
| 22 |
+
# --gradient_checkpointing true \
|
| 23 |
+
# --per_device_train_batch_size 2 \
|
| 24 |
+
# --gradient_accumulation_steps 2 \
|
| 25 |
+
# --logging_steps 1 \
|
| 26 |
+
# --learning_rate 2e-5 \
|
| 27 |
+
# --num_train_epochs 2 \
|
| 28 |
+
# --save_steps 400 \
|
| 29 |
+
# --model_max_length 8192 \
|
| 30 |
+
# --save_total_limit 16 \
|
| 31 |
+
# --bf16 || exit 1
|
| 32 |
+
|
| 33 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 34 |
+
# --master_port=9944 \
|
| 35 |
+
# sft.py \
|
| 36 |
+
# --deepspeed ds_zero3.json \
|
| 37 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 38 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 39 |
+
# --do_train \
|
| 40 |
+
# --save_safetensors true \
|
| 41 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 42 |
+
# --lr_scheduler_type cosine \
|
| 43 |
+
# --output_dir output/checkpoint/qwen_2_5_32b_data_1217 \
|
| 44 |
+
# --overwrite_output_dir \
|
| 45 |
+
# --warmup_ratio 0.03 \
|
| 46 |
+
# --gradient_checkpointing true \
|
| 47 |
+
# --per_device_train_batch_size 1 \
|
| 48 |
+
# --gradient_accumulation_steps 4 \
|
| 49 |
+
# --logging_steps 1 \
|
| 50 |
+
# --learning_rate 2e-5 \
|
| 51 |
+
# --num_train_epochs 1 \
|
| 52 |
+
# --save_steps 400 \
|
| 53 |
+
# --model_max_length 8192 \
|
| 54 |
+
# --save_total_limit 16 \
|
| 55 |
+
# --bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 58 |
+
# --master_port=9944 \
|
| 59 |
+
# --include localhost:4,5,6,7 \
|
| 60 |
+
# sft.py \
|
| 61 |
+
# --deepspeed ds_zero3.json \
|
| 62 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 63 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 64 |
+
# --do_train \
|
| 65 |
+
# --save_safetensors true \
|
| 66 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 67 |
+
# --lr_scheduler_type cosine \
|
| 68 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 69 |
+
# --overwrite_output_dir \
|
| 70 |
+
# --warmup_ratio 0.03 \
|
| 71 |
+
# --gradient_checkpointing true \
|
| 72 |
+
# --per_device_train_batch_size 1 \
|
| 73 |
+
# --gradient_accumulation_steps 4 \
|
| 74 |
+
# --logging_steps 1 \
|
| 75 |
+
# --learning_rate 2e-5 \
|
| 76 |
+
# --num_train_epochs 1 \
|
| 77 |
+
# --save_steps 400 \
|
| 78 |
+
# --model_max_length 8192 \
|
| 79 |
+
# --save_total_limit 16 \
|
| 80 |
+
# --bf16 || exit 1
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# qwen 7b 用自己的tokenizer
|
| 84 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 85 |
+
# --master_port=9944 \
|
| 86 |
+
# --include localhost:6,7 \
|
| 87 |
+
# sft.py \
|
| 88 |
+
# --deepspeed ds_zero3.json \
|
| 89 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 90 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 91 |
+
# --do_train \
|
| 92 |
+
# --save_safetensors true \
|
| 93 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 94 |
+
# --lr_scheduler_type cosine \
|
| 95 |
+
# --output_dir output/checkpoint/qwen_7b_original_tokenizer_inst_data_1217 \
|
| 96 |
+
# --overwrite_output_dir \
|
| 97 |
+
# --warmup_ratio 0.03 \
|
| 98 |
+
# --gradient_checkpointing true \
|
| 99 |
+
# --per_device_train_batch_size 1 \
|
| 100 |
+
# --gradient_accumulation_steps 4 \
|
| 101 |
+
# --logging_steps 1 \
|
| 102 |
+
# --learning_rate 2e-5 \
|
| 103 |
+
# --num_train_epochs 1 \
|
| 104 |
+
# --save_steps 400 \
|
| 105 |
+
# --model_max_length 8192 \
|
| 106 |
+
# --save_total_limit 16 \
|
| 107 |
+
# --bf16 || exit 1
|
| 108 |
+
|
| 109 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 110 |
+
# --master_port=9944 \
|
| 111 |
+
# --include localhost:7 \
|
| 112 |
+
# sft_1.py \
|
| 113 |
+
# --deepspeed ds_zero3.json \
|
| 114 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 115 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 116 |
+
# --do_train \
|
| 117 |
+
# --save_safetensors true \
|
| 118 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 119 |
+
# --lr_scheduler_type cosine \
|
| 120 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 121 |
+
# --overwrite_output_dir \
|
| 122 |
+
# --warmup_ratio 0.03 \
|
| 123 |
+
# --gradient_checkpointing true \
|
| 124 |
+
# --per_device_train_batch_size 1 \
|
| 125 |
+
# --gradient_accumulation_steps 4 \
|
| 126 |
+
# --logging_steps 1 \
|
| 127 |
+
# --learning_rate 2e-5 \
|
| 128 |
+
# --num_train_epochs 1 \
|
| 129 |
+
# --save_steps 400 \
|
| 130 |
+
# --model_max_length 8192 \
|
| 131 |
+
# --save_total_limit 16 \
|
| 132 |
+
# --bf16 || exit 1
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# qwen 7b 用自己的tokenizer
|
| 137 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 138 |
+
# --master_port=9944 \
|
| 139 |
+
# --include localhost:6,7 \
|
| 140 |
+
# sft.py \
|
| 141 |
+
# --deepspeed ds_zero3.json \
|
| 142 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 143 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 144 |
+
# --do_train \
|
| 145 |
+
# --save_safetensors true \
|
| 146 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 147 |
+
# --lr_scheduler_type cosine \
|
| 148 |
+
# --output_dir output/sft_use_original_tokenizer/qwen_7b_original_tokenizer_inst_data_1217_1 \
|
| 149 |
+
# --overwrite_output_dir \
|
| 150 |
+
# --warmup_ratio 0.03 \
|
| 151 |
+
# --gradient_checkpointing true \
|
| 152 |
+
# --per_device_train_batch_size 1 \
|
| 153 |
+
# --gradient_accumulation_steps 4 \
|
| 154 |
+
# --logging_steps 1 \
|
| 155 |
+
# --learning_rate 2e-5 \
|
| 156 |
+
# --num_train_epochs 1 \
|
| 157 |
+
# --model_max_length 8192 \
|
| 158 |
+
# --save_total_limit 16 \
|
| 159 |
+
# --bf16 || exit 1
|
| 160 |
+
# 定义参数
|
| 161 |
+
lr=1e-5
|
| 162 |
+
base=QwQ-32B
|
| 163 |
+
tokenizer=QwQ-32B
|
| 164 |
+
# train_data=hopotqa_1217.json
|
| 165 |
+
train_data=new_instruction_2k_sft
|
| 166 |
+
bsz=2
|
| 167 |
+
acc=4
|
| 168 |
+
|
| 169 |
+
# 生成随机 JOB-ID
|
| 170 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 171 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 172 |
+
|
| 173 |
+
# 输出路径
|
| 174 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 175 |
+
|
| 176 |
+
output_dir_1=${output_dir}
|
| 177 |
+
model_name_1=${base}
|
| 178 |
+
# 创建输出目录
|
| 179 |
+
mkdir -p "$output_dir"
|
| 180 |
+
|
| 181 |
+
echo ${output_dir}
|
| 182 |
+
|
| 183 |
+
# 执行 deepspeed 命令
|
| 184 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 185 |
+
--master_port=9944 \
|
| 186 |
+
sft_1.py \
|
| 187 |
+
--deepspeed ds_zero3_offload.json \
|
| 188 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 189 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 190 |
+
--do_train \
|
| 191 |
+
--save_safetensors true \
|
| 192 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 193 |
+
--lr_scheduler_type cosine \
|
| 194 |
+
--output_dir "$output_dir" \
|
| 195 |
+
--overwrite_output_dir \
|
| 196 |
+
--warmup_ratio 0.03 \
|
| 197 |
+
--gradient_checkpointing true \
|
| 198 |
+
--per_device_train_batch_size "$bsz" \
|
| 199 |
+
--gradient_accumulation_steps "$acc" \
|
| 200 |
+
--logging_steps 1 \
|
| 201 |
+
--learning_rate "$lr" \
|
| 202 |
+
--num_train_epochs 6 \
|
| 203 |
+
--save_strategy epoch \
|
| 204 |
+
--save_only_model true \
|
| 205 |
+
--model_max_length 30000 \
|
| 206 |
+
--save_total_limit 15 \
|
| 207 |
+
--bf16 || exit 1
|
| 208 |
+
|
| 209 |
+
# 3-15 model_max_length 25000 -> 30000
|
| 210 |
+
#################################################
|
| 211 |
+
lr=1e-5
|
| 212 |
+
base=DeepSeek-R1-Distill-Qwen-32B
|
| 213 |
+
tokenizer=DeepSeek-R1-Distill-Qwen-32B
|
| 214 |
+
# train_data=hopotqa_1217.json
|
| 215 |
+
train_data=new_instruction_2k_sft
|
| 216 |
+
bsz=2
|
| 217 |
+
acc=4
|
| 218 |
+
|
| 219 |
+
# 生成随机 JOB-ID
|
| 220 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 221 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 222 |
+
|
| 223 |
+
# 输出路径
|
| 224 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 225 |
+
|
| 226 |
+
output_dir_2=${output_dir}
|
| 227 |
+
model_name_2=${base}
|
| 228 |
+
|
| 229 |
+
# 创建输出目录
|
| 230 |
+
mkdir -p "$output_dir"
|
| 231 |
+
|
| 232 |
+
echo ${output_dir}
|
| 233 |
+
|
| 234 |
+
# 执行 deepspeed 命令
|
| 235 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 236 |
+
--master_port=9944 \
|
| 237 |
+
sft_1.py \
|
| 238 |
+
--deepspeed ds_zero3_offload.json \
|
| 239 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 240 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 241 |
+
--do_train \
|
| 242 |
+
--save_safetensors true \
|
| 243 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 244 |
+
--lr_scheduler_type cosine \
|
| 245 |
+
--output_dir "$output_dir" \
|
| 246 |
+
--overwrite_output_dir \
|
| 247 |
+
--warmup_ratio 0.03 \
|
| 248 |
+
--gradient_checkpointing true \
|
| 249 |
+
--per_device_train_batch_size "$bsz" \
|
| 250 |
+
--gradient_accumulation_steps "$acc" \
|
| 251 |
+
--logging_steps 1 \
|
| 252 |
+
--learning_rate "$lr" \
|
| 253 |
+
--num_train_epochs 6 \
|
| 254 |
+
--save_strategy epoch \
|
| 255 |
+
--save_only_model true \
|
| 256 |
+
--model_max_length 30000 \
|
| 257 |
+
--save_total_limit 15 \
|
| 258 |
+
--bf16 || exit 1
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
bash test.sh $output_dir_1 $model_name_1
|
| 262 |
+
|
| 263 |
+
bash test.sh $output_dir_2 $model_name_2
|
| 264 |
+
|
| 265 |
+
|
deep_search/sft/mix_2.sh
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
|
| 6 |
+
###########################
|
| 7 |
+
# 定义参数
|
| 8 |
+
lr=1e-5
|
| 9 |
+
base=Qwen2.5-7B-Instruct
|
| 10 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 11 |
+
# train_data=hopotqa_1217.json
|
| 12 |
+
train_data=ablation_subquery_1073_random_sample_871
|
| 13 |
+
bsz=2
|
| 14 |
+
acc=4
|
| 15 |
+
|
| 16 |
+
# 生成随机 JOB-ID
|
| 17 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 18 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 19 |
+
|
| 20 |
+
# 输出路径
|
| 21 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 22 |
+
|
| 23 |
+
output_dir_1=${output_dir}
|
| 24 |
+
model_name_1=${base}
|
| 25 |
+
dataset_1=${train_data}
|
| 26 |
+
# 创建输出目录
|
| 27 |
+
mkdir -p "$output_dir"
|
| 28 |
+
|
| 29 |
+
echo ${output_dir}
|
| 30 |
+
|
| 31 |
+
# 执行 deepspeed 命令
|
| 32 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 33 |
+
--master_port=9944 \
|
| 34 |
+
sft_1.py \
|
| 35 |
+
--deepspeed ds_zero3_offload.json \
|
| 36 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 37 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 38 |
+
--do_train \
|
| 39 |
+
--save_safetensors true \
|
| 40 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 41 |
+
--lr_scheduler_type cosine \
|
| 42 |
+
--output_dir "$output_dir" \
|
| 43 |
+
--overwrite_output_dir \
|
| 44 |
+
--warmup_ratio 0.03 \
|
| 45 |
+
--gradient_checkpointing true \
|
| 46 |
+
--per_device_train_batch_size "$bsz" \
|
| 47 |
+
--gradient_accumulation_steps "$acc" \
|
| 48 |
+
--logging_steps 1 \
|
| 49 |
+
--learning_rate "$lr" \
|
| 50 |
+
--num_train_epochs 6 \
|
| 51 |
+
--save_strategy epoch \
|
| 52 |
+
--save_only_model true \
|
| 53 |
+
--model_max_length 30000 \
|
| 54 |
+
--save_total_limit 5 \
|
| 55 |
+
--bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
########################### 2
|
| 59 |
+
# 定义参数
|
| 60 |
+
lr=1e-5
|
| 61 |
+
base=Qwen2.5-7B-Instruct
|
| 62 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 63 |
+
# train_data=hopotqa_1217.json
|
| 64 |
+
train_data=ablation_resp_format_1064_random_sample_871
|
| 65 |
+
bsz=2
|
| 66 |
+
acc=4
|
| 67 |
+
|
| 68 |
+
# 生成随机 JOB-ID
|
| 69 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 70 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 71 |
+
|
| 72 |
+
# 输出路径
|
| 73 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 74 |
+
|
| 75 |
+
output_dir_2=${output_dir}
|
| 76 |
+
model_name_2=${base}
|
| 77 |
+
dataset_2=${train_data}
|
| 78 |
+
# 创建输出目录
|
| 79 |
+
mkdir -p "$output_dir"
|
| 80 |
+
|
| 81 |
+
echo ${output_dir}
|
| 82 |
+
|
| 83 |
+
# 执行 deepspeed 命令
|
| 84 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 85 |
+
--master_port=9944 \
|
| 86 |
+
sft_1.py \
|
| 87 |
+
--deepspeed ds_zero3_offload.json \
|
| 88 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 89 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 90 |
+
--do_train \
|
| 91 |
+
--save_safetensors true \
|
| 92 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 93 |
+
--lr_scheduler_type cosine \
|
| 94 |
+
--output_dir "$output_dir" \
|
| 95 |
+
--overwrite_output_dir \
|
| 96 |
+
--warmup_ratio 0.03 \
|
| 97 |
+
--gradient_checkpointing true \
|
| 98 |
+
--per_device_train_batch_size "$bsz" \
|
| 99 |
+
--gradient_accumulation_steps "$acc" \
|
| 100 |
+
--logging_steps 1 \
|
| 101 |
+
--learning_rate "$lr" \
|
| 102 |
+
--num_train_epochs 6 \
|
| 103 |
+
--save_strategy epoch \
|
| 104 |
+
--save_only_model true \
|
| 105 |
+
--model_max_length 30000 \
|
| 106 |
+
--save_total_limit 5 \
|
| 107 |
+
--bf16 || exit 1
|
| 108 |
+
|
| 109 |
+
########################### 3
|
| 110 |
+
# 定义参数
|
| 111 |
+
lr=1e-5
|
| 112 |
+
base=Qwen2.5-7B-Instruct
|
| 113 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 114 |
+
# train_data=hopotqa_1217.json
|
| 115 |
+
train_data=ablation_ques_domain_filtered_data_738
|
| 116 |
+
bsz=2
|
| 117 |
+
acc=4
|
| 118 |
+
|
| 119 |
+
# 生成随机 JOB-ID
|
| 120 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 121 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 122 |
+
|
| 123 |
+
# 输出路径
|
| 124 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 125 |
+
|
| 126 |
+
output_dir_3=${output_dir}
|
| 127 |
+
model_name_3=${base}
|
| 128 |
+
dataset_3=${train_data}
|
| 129 |
+
# 创建输出目录
|
| 130 |
+
mkdir -p "$output_dir"
|
| 131 |
+
|
| 132 |
+
echo ${output_dir}
|
| 133 |
+
|
| 134 |
+
# 执行 deepspeed 命令
|
| 135 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 136 |
+
--master_port=9944 \
|
| 137 |
+
sft_1.py \
|
| 138 |
+
--deepspeed ds_zero3_offload.json \
|
| 139 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 140 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 141 |
+
--do_train \
|
| 142 |
+
--save_safetensors true \
|
| 143 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 144 |
+
--lr_scheduler_type cosine \
|
| 145 |
+
--output_dir "$output_dir" \
|
| 146 |
+
--overwrite_output_dir \
|
| 147 |
+
--warmup_ratio 0.03 \
|
| 148 |
+
--gradient_checkpointing true \
|
| 149 |
+
--per_device_train_batch_size "$bsz" \
|
| 150 |
+
--gradient_accumulation_steps "$acc" \
|
| 151 |
+
--logging_steps 1 \
|
| 152 |
+
--learning_rate "$lr" \
|
| 153 |
+
--num_train_epochs 6 \
|
| 154 |
+
--save_strategy epoch \
|
| 155 |
+
--save_only_model true \
|
| 156 |
+
--model_max_length 30000 \
|
| 157 |
+
--save_total_limit 5 \
|
| 158 |
+
--bf16 || exit 1
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
########################### 4
|
| 164 |
+
# 定义参数
|
| 165 |
+
lr=1e-5
|
| 166 |
+
base=Qwen2.5-7B-Instruct
|
| 167 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 168 |
+
# train_data=hopotqa_1217.json
|
| 169 |
+
train_data=ablation_ques_keywords_filtered_data_727
|
| 170 |
+
bsz=2
|
| 171 |
+
acc=4
|
| 172 |
+
|
| 173 |
+
# 生成随机 JOB-ID
|
| 174 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 175 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 176 |
+
|
| 177 |
+
# 输出路径
|
| 178 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 179 |
+
|
| 180 |
+
output_dir_4=${output_dir}
|
| 181 |
+
model_name_4=${base}
|
| 182 |
+
dataset_4=${train_data}
|
| 183 |
+
# 创建输出目录
|
| 184 |
+
mkdir -p "$output_dir"
|
| 185 |
+
|
| 186 |
+
echo ${output_dir}
|
| 187 |
+
|
| 188 |
+
# 执行 deepspeed 命令
|
| 189 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 190 |
+
--master_port=9944 \
|
| 191 |
+
sft_1.py \
|
| 192 |
+
--deepspeed ds_zero3_offload.json \
|
| 193 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 194 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 195 |
+
--do_train \
|
| 196 |
+
--save_safetensors true \
|
| 197 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 198 |
+
--lr_scheduler_type cosine \
|
| 199 |
+
--output_dir "$output_dir" \
|
| 200 |
+
--overwrite_output_dir \
|
| 201 |
+
--warmup_ratio 0.03 \
|
| 202 |
+
--gradient_checkpointing true \
|
| 203 |
+
--per_device_train_batch_size "$bsz" \
|
| 204 |
+
--gradient_accumulation_steps "$acc" \
|
| 205 |
+
--logging_steps 1 \
|
| 206 |
+
--learning_rate "$lr" \
|
| 207 |
+
--num_train_epochs 6 \
|
| 208 |
+
--save_strategy epoch \
|
| 209 |
+
--save_only_model true \
|
| 210 |
+
--model_max_length 30000 \
|
| 211 |
+
--save_total_limit 5 \
|
| 212 |
+
--bf16 || exit 1
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
########################### 5
|
| 218 |
+
# 定义参数
|
| 219 |
+
lr=1e-5
|
| 220 |
+
base=Qwen2.5-7B-Instruct
|
| 221 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 222 |
+
# train_data=hopotqa_1217.json
|
| 223 |
+
train_data=ablation_ques_yiwenci_filtered_data_811
|
| 224 |
+
bsz=2
|
| 225 |
+
acc=4
|
| 226 |
+
|
| 227 |
+
# 生成随机 JOB-ID
|
| 228 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 229 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 230 |
+
|
| 231 |
+
# 输出路径
|
| 232 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 233 |
+
|
| 234 |
+
output_dir_5=${output_dir}
|
| 235 |
+
model_name_5=${base}
|
| 236 |
+
dataset_5=${train_data}
|
| 237 |
+
# 创建输出目录
|
| 238 |
+
mkdir -p "$output_dir"
|
| 239 |
+
|
| 240 |
+
echo ${output_dir}
|
| 241 |
+
|
| 242 |
+
# 执行 deepspeed 命令
|
| 243 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 244 |
+
--master_port=9944 \
|
| 245 |
+
sft_1.py \
|
| 246 |
+
--deepspeed ds_zero3_offload.json \
|
| 247 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 248 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 249 |
+
--do_train \
|
| 250 |
+
--save_safetensors true \
|
| 251 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 252 |
+
--lr_scheduler_type cosine \
|
| 253 |
+
--output_dir "$output_dir" \
|
| 254 |
+
--overwrite_output_dir \
|
| 255 |
+
--warmup_ratio 0.03 \
|
| 256 |
+
--gradient_checkpointing true \
|
| 257 |
+
--per_device_train_batch_size "$bsz" \
|
| 258 |
+
--gradient_accumulation_steps "$acc" \
|
| 259 |
+
--logging_steps 1 \
|
| 260 |
+
--learning_rate "$lr" \
|
| 261 |
+
--num_train_epochs 6 \
|
| 262 |
+
--save_strategy epoch \
|
| 263 |
+
--save_only_model true \
|
| 264 |
+
--model_max_length 30000 \
|
| 265 |
+
--save_total_limit 5 \
|
| 266 |
+
--bf16 || exit 1
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
###################################
|
| 270 |
+
# 测试
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
bash test_two_model_qwq.sh $output_dir_1 $model_name_1 $dataset_1
|
| 274 |
+
|
| 275 |
+
sleep 7200
|
| 276 |
+
bash test_two_model_qwq_1.sh $output_dir_2 $model_name_2 $dataset_2
|
| 277 |
+
|
| 278 |
+
sleep 7200
|
| 279 |
+
bash test_two_model_qwq_1.sh $output_dir_3 $model_name_3 $dataset_3
|
| 280 |
+
|
| 281 |
+
sleep 7200
|
| 282 |
+
bash test_two_model_qwq_1.sh $output_dir_4 $model_name_4 $dataset_4
|
| 283 |
+
|
| 284 |
+
sleep 7200
|
| 285 |
+
bash test_two_model_qwq_1.sh $output_dir_5 $model_name_5 $dataset_5
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
nohup python /opt/aps/workdir/sunshuang/deep_search/search_o1/gen_data.py \
|
| 289 |
+
--input_file_path "/opt/aps/workdir/sunshuang/deep_search/data_syn/data/mixed_data/mixed_data_all.json" \
|
| 290 |
+
--cuda_visible_devices "0,1,2,3" > gen_data_1.log 2>&1 &
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
nohup python /opt/aps/workdir/sunshuang/deep_search/search_o1/gen_data.py \
|
| 294 |
+
--input_file_path "/opt/aps/workdir/sunshuang/deep_search/data_syn/data/mixed_data/mixed_data_all.json" \
|
| 295 |
+
--cuda_visible_devices "4,5,6,7" > gen_data_2.log 2>&1 &
|
deep_search/sft/mix_2_1.sh
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
|
| 6 |
+
# ###########################
|
| 7 |
+
# # 定义参数
|
| 8 |
+
# lr=1e-5
|
| 9 |
+
# base=Qwen2.5-7B-Instruct
|
| 10 |
+
# tokenizer=Qwen2.5-7B-Instruct
|
| 11 |
+
# # train_data=hopotqa_1217.json
|
| 12 |
+
# train_data=ablation_resp_difficulty_1678_random_sample_871
|
| 13 |
+
# bsz=2
|
| 14 |
+
# acc=4
|
| 15 |
+
|
| 16 |
+
# # 生成随机 JOB-ID
|
| 17 |
+
# JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 18 |
+
# save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 19 |
+
|
| 20 |
+
# # 输出路径
|
| 21 |
+
# output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 22 |
+
|
| 23 |
+
# output_dir_1=${output_dir}
|
| 24 |
+
# model_name_1=${base}
|
| 25 |
+
# dataset_1=${train_data}
|
| 26 |
+
# # 创建输出目录
|
| 27 |
+
# mkdir -p "$output_dir"
|
| 28 |
+
|
| 29 |
+
# echo ${output_dir}
|
| 30 |
+
|
| 31 |
+
# # 执行 deepspeed 命令
|
| 32 |
+
# /opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 33 |
+
# --master_port=9944 \
|
| 34 |
+
# sft_1.py \
|
| 35 |
+
# --deepspeed ds_zero3_offload.json \
|
| 36 |
+
# --model_name_or_path "/capacity/userdata/models/${base}" \
|
| 37 |
+
# --tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 38 |
+
# --do_train \
|
| 39 |
+
# --save_safetensors true \
|
| 40 |
+
# --data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 41 |
+
# --lr_scheduler_type cosine \
|
| 42 |
+
# --output_dir "$output_dir" \
|
| 43 |
+
# --overwrite_output_dir \
|
| 44 |
+
# --warmup_ratio 0.03 \
|
| 45 |
+
# --gradient_checkpointing true \
|
| 46 |
+
# --per_device_train_batch_size "$bsz" \
|
| 47 |
+
# --gradient_accumulation_steps "$acc" \
|
| 48 |
+
# --logging_steps 1 \
|
| 49 |
+
# --learning_rate "$lr" \
|
| 50 |
+
# --num_train_epochs 6 \
|
| 51 |
+
# --save_strategy epoch \
|
| 52 |
+
# --save_only_model true \
|
| 53 |
+
# --model_max_length 30000 \
|
| 54 |
+
# --save_total_limit 2 \
|
| 55 |
+
# --bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
########################### 2
|
| 59 |
+
# 定义参数
|
| 60 |
+
lr=1e-5
|
| 61 |
+
base=Qwen2.5-7B-Instruct
|
| 62 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 63 |
+
# train_data=hopotqa_1217.json
|
| 64 |
+
train_data=ablation_resp_format_1064_random_sample_871
|
| 65 |
+
bsz=2
|
| 66 |
+
acc=4
|
| 67 |
+
|
| 68 |
+
# 生成随机 JOB-ID
|
| 69 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 70 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 71 |
+
|
| 72 |
+
# 输出路径
|
| 73 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 74 |
+
|
| 75 |
+
output_dir_2=${output_dir}
|
| 76 |
+
model_name_2=${base}
|
| 77 |
+
dataset_2=${train_data}
|
| 78 |
+
# 创建输出目录
|
| 79 |
+
mkdir -p "$output_dir"
|
| 80 |
+
|
| 81 |
+
echo ${output_dir}
|
| 82 |
+
|
| 83 |
+
# 执行 deepspeed 命令
|
| 84 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 85 |
+
--master_port=9944 \
|
| 86 |
+
sft_1.py \
|
| 87 |
+
--deepspeed ds_zero3_offload.json \
|
| 88 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 89 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 90 |
+
--do_train \
|
| 91 |
+
--save_safetensors true \
|
| 92 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 93 |
+
--lr_scheduler_type cosine \
|
| 94 |
+
--output_dir "$output_dir" \
|
| 95 |
+
--overwrite_output_dir \
|
| 96 |
+
--warmup_ratio 0.03 \
|
| 97 |
+
--gradient_checkpointing true \
|
| 98 |
+
--per_device_train_batch_size "$bsz" \
|
| 99 |
+
--gradient_accumulation_steps "$acc" \
|
| 100 |
+
--logging_steps 1 \
|
| 101 |
+
--learning_rate "$lr" \
|
| 102 |
+
--num_train_epochs 6 \
|
| 103 |
+
--save_strategy epoch \
|
| 104 |
+
--save_only_model true \
|
| 105 |
+
--model_max_length 30000 \
|
| 106 |
+
--save_total_limit 2 \
|
| 107 |
+
--bf16 || exit 1
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# #### 3
|
| 112 |
+
# lr=1e-5
|
| 113 |
+
# base=Qwen2.5-7B-Instruct
|
| 114 |
+
# tokenizer=Qwen2.5-7B-Instruct
|
| 115 |
+
# # train_data=hopotqa_1217.json
|
| 116 |
+
# train_data=ablation_subquery_1073
|
| 117 |
+
# bsz=2
|
| 118 |
+
# acc=4
|
| 119 |
+
|
| 120 |
+
# # 生成随机 JOB-ID
|
| 121 |
+
# JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 122 |
+
# save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_ablation"
|
| 123 |
+
|
| 124 |
+
# # 输出路径
|
| 125 |
+
# output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 126 |
+
|
| 127 |
+
# output_dir_1=${output_dir}
|
| 128 |
+
# model_name_1=${base}
|
| 129 |
+
# dataset_1=${train_data}
|
| 130 |
+
# # 创建输出目录
|
| 131 |
+
# mkdir -p "$output_dir"
|
| 132 |
+
|
| 133 |
+
# echo ${output_dir}
|
| 134 |
+
|
| 135 |
+
# # 执行 deepspeed 命令
|
| 136 |
+
# /opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 137 |
+
# --master_port=9944 \
|
| 138 |
+
# sft_1.py \
|
| 139 |
+
# --deepspeed ds_zero3_offload.json \
|
| 140 |
+
# --model_name_or_path "/capacity/userdata/models/${base}" \
|
| 141 |
+
# --tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 142 |
+
# --do_train \
|
| 143 |
+
# --save_safetensors true \
|
| 144 |
+
# --data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 145 |
+
# --lr_scheduler_type cosine \
|
| 146 |
+
# --output_dir "$output_dir" \
|
| 147 |
+
# --overwrite_output_dir \
|
| 148 |
+
# --warmup_ratio 0.03 \
|
| 149 |
+
# --gradient_checkpointing true \
|
| 150 |
+
# --per_device_train_batch_size "$bsz" \
|
| 151 |
+
# --gradient_accumulation_steps "$acc" \
|
| 152 |
+
# --logging_steps 1 \
|
| 153 |
+
# --learning_rate "$lr" \
|
| 154 |
+
# --num_train_epochs 6 \
|
| 155 |
+
# --save_strategy epoch \
|
| 156 |
+
# --save_only_model true \
|
| 157 |
+
# --model_max_length 30000 \
|
| 158 |
+
# --save_total_limit 2 \
|
| 159 |
+
# --bf16 || exit 1
|
| 160 |
+
|
| 161 |
+
###################################
|
| 162 |
+
# 测试
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
# bash test_two_model_qwq.sh $output_dir_1 $model_name_1 $dataset_1
|
| 166 |
+
|
| 167 |
+
# sleep 7200
|
| 168 |
+
# bash test_two_model_qwq.sh $output_dir_2 $model_name_2 $dataset_2
|
| 169 |
+
|
| 170 |
+
# sleep 600
|
| 171 |
+
# bash test.sh $output_dir_3 $model_name_3 $dataset_3
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
|
deep_search/sft/mix_math.sh
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# 定义参数
|
| 6 |
+
lr=1e-5
|
| 7 |
+
base=QwQ-32B
|
| 8 |
+
tokenizer=QwQ-32B
|
| 9 |
+
# train_data=hopotqa_1217.json
|
| 10 |
+
train_data=no_error_data_871
|
| 11 |
+
bsz=2
|
| 12 |
+
acc=4
|
| 13 |
+
|
| 14 |
+
# 生成随机 JOB-ID
|
| 15 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 16 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_mixed_math"
|
| 17 |
+
|
| 18 |
+
# 输出路径
|
| 19 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 20 |
+
|
| 21 |
+
output_dir_1=${output_dir}
|
| 22 |
+
model_name_1=${base}
|
| 23 |
+
dataset_1=${train_data}
|
| 24 |
+
# 创建输出目录
|
| 25 |
+
mkdir -p "$output_dir"
|
| 26 |
+
|
| 27 |
+
echo ${output_dir}
|
| 28 |
+
|
| 29 |
+
# 执行 deepspeed 命令
|
| 30 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 31 |
+
--master_port=9944 \
|
| 32 |
+
sft_2_math.py \
|
| 33 |
+
--deepspeed ds_zero3_offload.json \
|
| 34 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 35 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 36 |
+
--do_train \
|
| 37 |
+
--save_safetensors true \
|
| 38 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 39 |
+
--lr_scheduler_type cosine \
|
| 40 |
+
--output_dir "$output_dir" \
|
| 41 |
+
--overwrite_output_dir \
|
| 42 |
+
--warmup_ratio 0.03 \
|
| 43 |
+
--gradient_checkpointing true \
|
| 44 |
+
--per_device_train_batch_size "$bsz" \
|
| 45 |
+
--gradient_accumulation_steps "$acc" \
|
| 46 |
+
--logging_steps 1 \
|
| 47 |
+
--learning_rate "$lr" \
|
| 48 |
+
--num_train_epochs 1 \
|
| 49 |
+
--save_strategy epoch \
|
| 50 |
+
--save_only_model true \
|
| 51 |
+
--model_max_length 30000 \
|
| 52 |
+
--save_total_limit 4 \
|
| 53 |
+
--bf16 || exit 1
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# bash test_two_model_qwq.sh $output_dir_1 $model_name_1 $dataset_1
|
| 57 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 58 |
+
# bash test.sh $output_dir_3 $model_name_3
|
| 59 |
+
|
| 60 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 61 |
+
|
| 62 |
+
|
deep_search/sft/mix_math_after_search.sh
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# 定义参数
|
| 6 |
+
lr=1e-5
|
| 7 |
+
base=qwen7b_sft_871_checkpoint-78
|
| 8 |
+
tokenizer=qwen7b_sft_871_checkpoint-78
|
| 9 |
+
# train_data=hopotqa_1217.json
|
| 10 |
+
train_data=math_qwq_4524_selected_add_prompt_871
|
| 11 |
+
bsz=2
|
| 12 |
+
acc=4
|
| 13 |
+
|
| 14 |
+
# 生成随机 JOB-ID
|
| 15 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 16 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 17 |
+
|
| 18 |
+
# 输出路径
|
| 19 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 20 |
+
|
| 21 |
+
output_dir_1=${output_dir}
|
| 22 |
+
model_name_1=${base}
|
| 23 |
+
dataset_1=${train_data}
|
| 24 |
+
# 创建输出目录
|
| 25 |
+
mkdir -p "$output_dir"
|
| 26 |
+
|
| 27 |
+
echo ${output_dir}
|
| 28 |
+
|
| 29 |
+
# 执行 deepspeed 命令
|
| 30 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 31 |
+
--master_port=9944 \
|
| 32 |
+
sft_2_math_after_search.py \
|
| 33 |
+
--deepspeed ds_zero3_offload.json \
|
| 34 |
+
--model_name_or_path "/capacity/userdata/ss/sft_search/qwen7b_sft_871_checkpoint-78" \
|
| 35 |
+
--tokenizer_name_or_path "/capacity/userdata/ss/sft_search/qwen7b_sft_871_checkpoint-78" \
|
| 36 |
+
--do_train \
|
| 37 |
+
--save_safetensors true \
|
| 38 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 39 |
+
--lr_scheduler_type cosine \
|
| 40 |
+
--output_dir "$output_dir" \
|
| 41 |
+
--overwrite_output_dir \
|
| 42 |
+
--warmup_ratio 0.03 \
|
| 43 |
+
--gradient_checkpointing true \
|
| 44 |
+
--per_device_train_batch_size "$bsz" \
|
| 45 |
+
--gradient_accumulation_steps "$acc" \
|
| 46 |
+
--logging_steps 1 \
|
| 47 |
+
--learning_rate "$lr" \
|
| 48 |
+
--num_train_epochs 6 \
|
| 49 |
+
--save_strategy epoch \
|
| 50 |
+
--save_only_model true \
|
| 51 |
+
--model_max_length 30000 \
|
| 52 |
+
--save_total_limit 4 \
|
| 53 |
+
--bf16 || exit 1
|
| 54 |
+
|
| 55 |
+
bash test_two_model.sh $output_dir_1 $model_name_1 $dataset_1
|
| 56 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 57 |
+
# bash test.sh $output_dir_3 $model_name_3
|
| 58 |
+
|
| 59 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 60 |
+
|
| 61 |
+
|
deep_search/sft/mix_math_multi_node_1.sh
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# 定义参数
|
| 6 |
+
lr=1e-5
|
| 7 |
+
base=QwQ-32B
|
| 8 |
+
tokenizer=QwQ-32B
|
| 9 |
+
# train_data=hopotqa_1217.json
|
| 10 |
+
train_data=no_error_data_871
|
| 11 |
+
bsz=1
|
| 12 |
+
acc=8
|
| 13 |
+
|
| 14 |
+
# 生成随机 JOB-ID
|
| 15 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 16 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_mixed_math"
|
| 17 |
+
|
| 18 |
+
# 输出路径
|
| 19 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 20 |
+
|
| 21 |
+
output_dir_1=${output_dir}
|
| 22 |
+
model_name_1=${base}
|
| 23 |
+
dataset_1=${train_data}
|
| 24 |
+
# 创建输出目录
|
| 25 |
+
mkdir -p "$output_dir"
|
| 26 |
+
|
| 27 |
+
echo ${output_dir}
|
| 28 |
+
|
| 29 |
+
# 执行 deepspeed 命令
|
| 30 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 31 |
+
--hostfile=hostfile \
|
| 32 |
+
--no_ssh \
|
| 33 |
+
--node_rank=1 \
|
| 34 |
+
--master_addr=172.19.164.116 \
|
| 35 |
+
--master_port=9944 \
|
| 36 |
+
sft_2_math.py \
|
| 37 |
+
--deepspeed ds_zero3_offload.json \
|
| 38 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 39 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 40 |
+
--do_train \
|
| 41 |
+
--save_safetensors true \
|
| 42 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 43 |
+
--lr_scheduler_type cosine \
|
| 44 |
+
--output_dir "$output_dir" \
|
| 45 |
+
--overwrite_output_dir \
|
| 46 |
+
--warmup_ratio 0.03 \
|
| 47 |
+
--gradient_checkpointing true \
|
| 48 |
+
--per_device_train_batch_size "$bsz" \
|
| 49 |
+
--gradient_accumulation_steps "$acc" \
|
| 50 |
+
--logging_steps 1 \
|
| 51 |
+
--learning_rate "$lr" \
|
| 52 |
+
--num_train_epochs 6 \
|
| 53 |
+
--save_strategy epoch \
|
| 54 |
+
--save_only_model true \
|
| 55 |
+
--model_max_length 30000 \
|
| 56 |
+
--save_total_limit 6 \
|
| 57 |
+
--bf16 || exit 1
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
bash test_two_model_qwq_1.sh $output_dir_1 $model_name_1 $dataset_1
|
| 61 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 62 |
+
# bash test.sh $output_dir_3 $model_name_3
|
| 63 |
+
|
| 64 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 65 |
+
|
| 66 |
+
|
deep_search/sft/mix_math_sht_new_prompt.sh
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# 定义参数
|
| 6 |
+
lr=1e-5
|
| 7 |
+
base=Qwen2.5-7B-Instruct
|
| 8 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 9 |
+
# train_data=hopotqa_1217.json
|
| 10 |
+
train_data=no_error_data_871_sht_new_prompt
|
| 11 |
+
bsz=2
|
| 12 |
+
acc=4
|
| 13 |
+
|
| 14 |
+
# 生成随机 JOB-ID
|
| 15 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 16 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_mixed_math_sht_new_prmopt"
|
| 17 |
+
|
| 18 |
+
# 输出路径
|
| 19 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 20 |
+
|
| 21 |
+
output_dir_1=${output_dir}
|
| 22 |
+
model_name_1=${base}
|
| 23 |
+
dataset_1=${train_data}
|
| 24 |
+
# 创建输出目录
|
| 25 |
+
mkdir -p "$output_dir"
|
| 26 |
+
|
| 27 |
+
echo ${output_dir}
|
| 28 |
+
|
| 29 |
+
# 执行 deepspeed 命令
|
| 30 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 31 |
+
--master_port=9944 \
|
| 32 |
+
sft_2_math_sht_new_prompt.py \
|
| 33 |
+
--deepspeed ds_zero3_offload.json \
|
| 34 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 35 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 36 |
+
--do_train \
|
| 37 |
+
--save_safetensors true \
|
| 38 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 39 |
+
--lr_scheduler_type cosine \
|
| 40 |
+
--output_dir "$output_dir" \
|
| 41 |
+
--overwrite_output_dir \
|
| 42 |
+
--warmup_ratio 0.03 \
|
| 43 |
+
--gradient_checkpointing true \
|
| 44 |
+
--per_device_train_batch_size "$bsz" \
|
| 45 |
+
--gradient_accumulation_steps "$acc" \
|
| 46 |
+
--logging_steps 1 \
|
| 47 |
+
--learning_rate "$lr" \
|
| 48 |
+
--num_train_epochs 5 \
|
| 49 |
+
--save_strategy epoch \
|
| 50 |
+
--save_only_model true \
|
| 51 |
+
--model_max_length 30000 \
|
| 52 |
+
--save_total_limit 3 \
|
| 53 |
+
--bf16 || exit 1
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# bash test_two_model_qwq.sh $output_dir_1 $model_name_1 $dataset_1
|
| 57 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 58 |
+
# bash test.sh $output_dir_3 $model_name_3
|
| 59 |
+
|
| 60 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 61 |
+
|
| 62 |
+
|
deep_search/sft/mix_wo_mask.sh
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# 定义参数
|
| 6 |
+
lr=1e-5
|
| 7 |
+
base=Qwen2.5-7B-Instruct
|
| 8 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 9 |
+
# train_data=hopotqa_1217.json
|
| 10 |
+
train_data=no_error_data_871
|
| 11 |
+
bsz=2
|
| 12 |
+
acc=4
|
| 13 |
+
|
| 14 |
+
# 生成随机 JOB-ID
|
| 15 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 16 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_871_wo_mask"
|
| 17 |
+
|
| 18 |
+
# 输出路径
|
| 19 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 20 |
+
|
| 21 |
+
output_dir_1=${output_dir}
|
| 22 |
+
model_name_1=${base}
|
| 23 |
+
dataset_1=${train_data}
|
| 24 |
+
# 创建输出目录
|
| 25 |
+
mkdir -p "$output_dir"
|
| 26 |
+
|
| 27 |
+
echo ${output_dir}
|
| 28 |
+
|
| 29 |
+
# 执行 deepspeed 命令
|
| 30 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 31 |
+
--master_port=9944 \
|
| 32 |
+
sft_2_wo_mask.py \
|
| 33 |
+
--deepspeed ds_zero3_offload.json \
|
| 34 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 35 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 36 |
+
--do_train \
|
| 37 |
+
--save_safetensors true \
|
| 38 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 39 |
+
--lr_scheduler_type cosine \
|
| 40 |
+
--output_dir "$output_dir" \
|
| 41 |
+
--overwrite_output_dir \
|
| 42 |
+
--warmup_ratio 0.03 \
|
| 43 |
+
--gradient_checkpointing true \
|
| 44 |
+
--per_device_train_batch_size "$bsz" \
|
| 45 |
+
--gradient_accumulation_steps "$acc" \
|
| 46 |
+
--logging_steps 1 \
|
| 47 |
+
--learning_rate "$lr" \
|
| 48 |
+
--num_train_epochs 6 \
|
| 49 |
+
--save_strategy epoch \
|
| 50 |
+
--save_only_model true \
|
| 51 |
+
--model_max_length 30000 \
|
| 52 |
+
--save_total_limit 6 \
|
| 53 |
+
--bf16 || exit 1
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# bash test_two_model_qwq.sh $output_dir_1 $model_name_1 $dataset_1
|
| 57 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 58 |
+
# bash test.sh $output_dir_3 $model_name_3
|
| 59 |
+
|
| 60 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 61 |
+
|
| 62 |
+
|
deep_search/sft/run.sh
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export HOME=/opt/aps/workdir/home
|
| 2 |
+
export http_proxy=http://127.0.0.1:7880
|
| 3 |
+
export https_proxy=http://127.0.0.1:7880
|
| 4 |
+
|
| 5 |
+
./miniforge3/bin/conda init bash
|
| 6 |
+
source ~/.bashrc
|
| 7 |
+
eval "$(conda shell.bash hook)"
|
| 8 |
+
conda activate llm
|
| 9 |
+
bash mix.sh
|
| 10 |
+
# set -x
|
| 11 |
+
|
| 12 |
+
# bash Miniforge3.sh -b -u -p ./miniforge3
|
| 13 |
+
# ./miniforge3/bin/conda init bash
|
| 14 |
+
# source ~/.bashrc
|
| 15 |
+
|
| 16 |
+
# # export https_proxy='http://agent.baidu.com:8891'
|
| 17 |
+
# # export http_proxy='http://agent.baidu.com:8891'
|
| 18 |
+
|
| 19 |
+
# eval "$(conda shell.bash hook)"
|
| 20 |
+
# conda create -n llm python=3.11 -y
|
| 21 |
+
# conda activate llm
|
| 22 |
+
|
| 23 |
+
# pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
| 24 |
+
|
| 25 |
+
cd /opt/aps/workdir/sunshuang/search_o1/ && http_proxy=http://127.0.0.1:7880 https_proxy=http://127.0.0.1:7880 CUDA_VISIBLE_DEVICES=2,3 /opt/aps/workdir/miniforge3/envs/search_o1/bin/python scripts/reason_two_model_2.py --dataset_name hotpotqa --cache_dir_base /opt/aps/workdir/sunshuang/search_o1/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/JOB9986:LR1e-5:BASEQwen2.5-32B-Instruct:TOKENDeepSeek-R1-Distill-Qwen-32:BSZ1:ACC8/38/hotpotqa --output_dir_base /opt/aps/workdir/sunshuang/search_o1/outputs_reason_two_model/eval_reason_two_model/qwen-instruct-32B/JOB9986:LR1e-5:BASEQwen2.5-32B-Instruct:TOKENDeepSeek-R1-Distill-Qwen-32:BSZ1:ACC8/38/hotpotqa --split test --max_search_limit 5 --max_turn 10 --top_k 5 --max_doc_len 3000 --subset_num 100 --model_path /opt/aps/workdir/output/JOB9986:LR1e-5:BASEQwen2.5-32B-Instruct:TOKENDeepSeek-R1-Distill-Qwen-32:BSZ1:ACC8/checkpoint-38 --model_doc_reason_path "/capacity/userdata/models/Qwen2.5-32B-Instruct" --jina_api_key "jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ" --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" --openai_api_base "http://localhost:8001/v1"
|
deep_search/sft/sft.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import Optional, Dict, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import random_split
|
| 11 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 12 |
+
import transformers
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
from transformers import Trainer
|
| 15 |
+
import random
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from transformers import AutoModelForCausalLM, TrainingArguments
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
from transformers import DataCollatorForSeq2Seq
|
| 20 |
+
import shutil
|
| 21 |
+
|
| 22 |
+
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import matplotlib.pyplot as plt
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class ModelArguments:
|
| 31 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 32 |
+
# flash_attention: Optional[bool] = field(default=False)
|
| 33 |
+
tokenizer_name_or_path: Optional[str] = field(default=None)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class DataArguments:
|
| 38 |
+
data_path: str = field(
|
| 39 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 40 |
+
)
|
| 41 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 42 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 47 |
+
cache_dir: Optional[str] = field(default=None)
|
| 48 |
+
optim: str = field(default="adamw_torch")
|
| 49 |
+
model_max_length: int = field(
|
| 50 |
+
default=512,
|
| 51 |
+
metadata={
|
| 52 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 53 |
+
},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
IGNORE_INDEX = -100
|
| 58 |
+
MAX_LENGTH = 2000
|
| 59 |
+
|
| 60 |
+
def process(sample, tokenizer):
|
| 61 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 62 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 63 |
+
source = sample["input"]
|
| 64 |
+
# print(source)
|
| 65 |
+
# print(tokenizer.bos_token)
|
| 66 |
+
# print(source == None)
|
| 67 |
+
# if tokenizer.bos_token not in source:
|
| 68 |
+
# source = tokenizer.apply_chat_template(
|
| 69 |
+
# [
|
| 70 |
+
# {'role': 'user', 'content': source}
|
| 71 |
+
# ],
|
| 72 |
+
# tokenize=False, add_generation_prompt=True
|
| 73 |
+
# )
|
| 74 |
+
# # print(source)
|
| 75 |
+
source = tokenizer.apply_chat_template(
|
| 76 |
+
[
|
| 77 |
+
{'role': 'user', 'content': source}
|
| 78 |
+
],
|
| 79 |
+
tokenize=False, add_generation_prompt=True
|
| 80 |
+
)
|
| 81 |
+
# print(source)
|
| 82 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 83 |
+
target = [IGNORE_INDEX] * len(source)
|
| 84 |
+
for output in sample["output"]:
|
| 85 |
+
for k, v in output.items():
|
| 86 |
+
if v is None:
|
| 87 |
+
continue
|
| 88 |
+
v_tokens = tokenizer(v, add_special_tokens=False)["input_ids"]
|
| 89 |
+
if k in ["gen"]:
|
| 90 |
+
source += v_tokens
|
| 91 |
+
target += v_tokens
|
| 92 |
+
elif k in ["doc_gen"]:
|
| 93 |
+
source += v_tokens
|
| 94 |
+
target += [IGNORE_INDEX] * len(v_tokens)
|
| 95 |
+
input_ids = source
|
| 96 |
+
labels = target
|
| 97 |
+
|
| 98 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 99 |
+
labels.append(tokenizer.eos_token_id)
|
| 100 |
+
# if tokenizer.eos_token_id not in source:
|
| 101 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 102 |
+
# if tokenizer.eos_token_id not in labels:
|
| 103 |
+
# labels.append(tokenizer.eos_token_id)
|
| 104 |
+
|
| 105 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 106 |
+
# return None
|
| 107 |
+
result = {
|
| 108 |
+
"input_ids": input_ids,
|
| 109 |
+
"attention_mask": [1] * len(input_ids),
|
| 110 |
+
"labels": labels,
|
| 111 |
+
}
|
| 112 |
+
# print(result)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def print_function(example, tokenizer):
|
| 117 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
| 118 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
| 119 |
+
print("label_ids:\n{}".format(example["labels"]))
|
| 120 |
+
print("labels:\n{}".format(
|
| 121 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
| 122 |
+
))
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def get_dataset(file_path, tokenizer):
|
| 126 |
+
dataset = load_dataset('json', data_files=file_path)
|
| 127 |
+
train_dataset = dataset["train"]
|
| 128 |
+
file_name = os.path.basename(file_path)
|
| 129 |
+
dataset_name = os.path.splitext(file_name)[0]
|
| 130 |
+
# print(f"dataset_name: {dataset_name}")
|
| 131 |
+
# if os.path.exists(f"input/real_cache/{dataset_name}/"):
|
| 132 |
+
# shutil.rmtree(f"input/real_cache/{dataset_name}/")
|
| 133 |
+
# os.makedirs(f"input/real_cache/{dataset_name}/", exist_ok=True)
|
| 134 |
+
# tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, cache_file_name=f"input/real_cache/{dataset_name}/cache1.arrow")
|
| 135 |
+
tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 136 |
+
print_function(next(iter(tokenized_dataset)), tokenizer)
|
| 137 |
+
print(f"len of dataset before filter: {len(tokenized_dataset)}")
|
| 138 |
+
|
| 139 |
+
filtered_dataset = []
|
| 140 |
+
for item in tokenized_dataset:
|
| 141 |
+
if len(item["input_ids"]) <= 8000:
|
| 142 |
+
filtered_dataset.append(item)
|
| 143 |
+
print(f"len of dataset after filter: {len(filtered_dataset)}")
|
| 144 |
+
return filtered_dataset
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def train():
|
| 148 |
+
parser = transformers.HfArgumentParser(
|
| 149 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 150 |
+
)
|
| 151 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 152 |
+
|
| 153 |
+
print("==========Model Args=========")
|
| 154 |
+
print(model_args)
|
| 155 |
+
print("==========Data Args=========")
|
| 156 |
+
print(data_args)
|
| 157 |
+
print("==========Training Args=========")
|
| 158 |
+
print(training_args)
|
| 159 |
+
|
| 160 |
+
if training_args.gradient_checkpointing:
|
| 161 |
+
use_cache = False # use_cache与gradient_checkpointing不能同时设置为true
|
| 162 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 163 |
+
model_args.model_name_or_path,
|
| 164 |
+
_attn_implementation="flash_attention_2",
|
| 165 |
+
use_cache=use_cache,
|
| 166 |
+
# save_only_model=True
|
| 167 |
+
).float()
|
| 168 |
+
# model = AutoLigerKernelForCausalLM.from_pretrained(
|
| 169 |
+
# model_args.model_name_or_path,
|
| 170 |
+
# _attn_implementation="flash_attention_2",
|
| 171 |
+
# use_cache=use_cache,
|
| 172 |
+
# # save_only_model=True
|
| 173 |
+
# ).float()
|
| 174 |
+
if model_args.tokenizer_name_or_path is None:
|
| 175 |
+
model_args.tokenizer_name_or_path = model_args.model_name_or_path
|
| 176 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 177 |
+
model_args.tokenizer_name_or_path, model_max_length=training_args.model_max_length
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
if tokenizer.pad_token is None:
|
| 181 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 182 |
+
|
| 183 |
+
dataset = get_dataset(data_args.data_path, tokenizer)
|
| 184 |
+
|
| 185 |
+
data_collator = DataCollatorForSeq2Seq(
|
| 186 |
+
tokenizer=tokenizer,
|
| 187 |
+
label_pad_token_id=-100,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
trainer = Trainer(
|
| 191 |
+
model=model,
|
| 192 |
+
args=training_args,
|
| 193 |
+
tokenizer=tokenizer,
|
| 194 |
+
data_collator=data_collator,
|
| 195 |
+
train_dataset=dataset,
|
| 196 |
+
)
|
| 197 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 198 |
+
trainer.save_model(training_args.output_dir)
|
| 199 |
+
trainer.save_state()
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
torch.manual_seed(42)
|
| 204 |
+
train()
|
deep_search/sft/sft_2_math_after_search.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import Optional, Dict, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import random_split
|
| 11 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 12 |
+
import transformers
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
from transformers import Trainer
|
| 15 |
+
import random
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from transformers import AutoModelForCausalLM, TrainingArguments
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
from transformers import DataCollatorForSeq2Seq
|
| 20 |
+
import shutil
|
| 21 |
+
|
| 22 |
+
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import matplotlib.pyplot as plt
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class ModelArguments:
|
| 31 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 32 |
+
# flash_attention: Optional[bool] = field(default=False)
|
| 33 |
+
tokenizer_name_or_path: Optional[str] = field(default=None)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class DataArguments:
|
| 38 |
+
data_path: str = field(
|
| 39 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 40 |
+
)
|
| 41 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 42 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 47 |
+
cache_dir: Optional[str] = field(default=None)
|
| 48 |
+
optim: str = field(default="adamw_torch")
|
| 49 |
+
model_max_length: int = field(
|
| 50 |
+
default=512,
|
| 51 |
+
metadata={
|
| 52 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 53 |
+
},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
IGNORE_INDEX = -100
|
| 58 |
+
MAX_LENGTH = 2000
|
| 59 |
+
|
| 60 |
+
def process(sample, tokenizer):
|
| 61 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 62 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 63 |
+
source = sample["input"]
|
| 64 |
+
# print(source)
|
| 65 |
+
# print(tokenizer.bos_token)
|
| 66 |
+
# print(source == None)
|
| 67 |
+
# if tokenizer.bos_token not in source:
|
| 68 |
+
# source = tokenizer.apply_chat_template(
|
| 69 |
+
# [
|
| 70 |
+
# {'role': 'user', 'content': source}
|
| 71 |
+
# ],
|
| 72 |
+
# tokenize=False, add_generation_prompt=True
|
| 73 |
+
# )
|
| 74 |
+
# # print(source)
|
| 75 |
+
source = tokenizer.apply_chat_template(
|
| 76 |
+
[
|
| 77 |
+
{'role': 'user', 'content': source}
|
| 78 |
+
],
|
| 79 |
+
tokenize=False, add_generation_prompt=True
|
| 80 |
+
)
|
| 81 |
+
# print(source)
|
| 82 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 83 |
+
target = [IGNORE_INDEX] * len(source)
|
| 84 |
+
for output in sample["output"]:
|
| 85 |
+
for k, v in output.items():
|
| 86 |
+
if v is None:
|
| 87 |
+
continue
|
| 88 |
+
v_tokens = tokenizer(v, add_special_tokens=False)["input_ids"]
|
| 89 |
+
if k in ["gen"]:
|
| 90 |
+
source += v_tokens
|
| 91 |
+
target += v_tokens
|
| 92 |
+
elif k in ["doc_gen"]:
|
| 93 |
+
source += v_tokens
|
| 94 |
+
target += [IGNORE_INDEX] * len(v_tokens)
|
| 95 |
+
input_ids = source
|
| 96 |
+
labels = target
|
| 97 |
+
|
| 98 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 99 |
+
labels.append(tokenizer.eos_token_id)
|
| 100 |
+
# if tokenizer.eos_token_id not in source:
|
| 101 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 102 |
+
# if tokenizer.eos_token_id not in labels:
|
| 103 |
+
# labels.append(tokenizer.eos_token_id)
|
| 104 |
+
|
| 105 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 106 |
+
# return None
|
| 107 |
+
result = {
|
| 108 |
+
"input_ids": input_ids,
|
| 109 |
+
"attention_mask": [1] * len(input_ids),
|
| 110 |
+
"labels": labels,
|
| 111 |
+
}
|
| 112 |
+
# print(result)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def process_math(sample, tokenizer):
|
| 117 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 118 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 119 |
+
source = sample["prompt"]
|
| 120 |
+
source = tokenizer.apply_chat_template(
|
| 121 |
+
[
|
| 122 |
+
{'role': 'user', 'content': source}
|
| 123 |
+
],
|
| 124 |
+
tokenize=False, add_generation_prompt=True
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 128 |
+
target = [IGNORE_INDEX] * len(source)
|
| 129 |
+
|
| 130 |
+
output = sample["output"]
|
| 131 |
+
output = tokenizer(output, add_special_tokens=False)["input_ids"]
|
| 132 |
+
|
| 133 |
+
source += output
|
| 134 |
+
target += output
|
| 135 |
+
|
| 136 |
+
input_ids = source
|
| 137 |
+
labels = target
|
| 138 |
+
|
| 139 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 140 |
+
labels.append(tokenizer.eos_token_id)
|
| 141 |
+
# if tokenizer.eos_token_id not in source:
|
| 142 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 143 |
+
# if tokenizer.eos_token_id not in labels:
|
| 144 |
+
# labels.append(tokenizer.eos_token_id)
|
| 145 |
+
|
| 146 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 147 |
+
# return None
|
| 148 |
+
result = {
|
| 149 |
+
"input_ids": input_ids,
|
| 150 |
+
"attention_mask": [1] * len(input_ids),
|
| 151 |
+
"labels": labels,
|
| 152 |
+
}
|
| 153 |
+
# print(result)
|
| 154 |
+
return result
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def print_function(example, tokenizer):
|
| 159 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
| 160 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
| 161 |
+
print("label_ids:\n{}".format(example["labels"]))
|
| 162 |
+
print("labels:\n{}".format(
|
| 163 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
| 164 |
+
))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def get_dataset(file_path, tokenizer, math_dataset=False):
|
| 168 |
+
dataset = load_dataset('json', data_files=file_path)
|
| 169 |
+
train_dataset = dataset["train"]
|
| 170 |
+
file_name = os.path.basename(file_path)
|
| 171 |
+
dataset_name = os.path.splitext(file_name)[0]
|
| 172 |
+
# print(f"dataset_name: {dataset_name}")
|
| 173 |
+
# if os.path.exists(f"input/real_cache/{dataset_name}/"):
|
| 174 |
+
# shutil.rmtree(f"input/real_cache/{dataset_name}/")
|
| 175 |
+
# os.makedirs(f"input/real_cache/{dataset_name}/", exist_ok=True)
|
| 176 |
+
# tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, cache_file_name=f"input/real_cache/{dataset_name}/cache1.arrow")
|
| 177 |
+
|
| 178 |
+
if math_dataset:
|
| 179 |
+
tokenized_dataset = train_dataset.map(process_math, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 180 |
+
else:
|
| 181 |
+
tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 182 |
+
print_function(next(iter(tokenized_dataset)), tokenizer)
|
| 183 |
+
print(f"len of dataset before filter: {len(tokenized_dataset)}")
|
| 184 |
+
|
| 185 |
+
print(f"type of tokenized_dataset: {type(tokenized_dataset)}")
|
| 186 |
+
|
| 187 |
+
filtered_dataset = []
|
| 188 |
+
for item in tokenized_dataset:
|
| 189 |
+
# if len(item["input_ids"]) <= 10000:
|
| 190 |
+
# print(item)
|
| 191 |
+
print(f"type of item: {type(item)}")
|
| 192 |
+
filtered_dataset.append(item)
|
| 193 |
+
print(f"len of dataset after filter: {len(filtered_dataset)}")
|
| 194 |
+
return filtered_dataset
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def train():
|
| 198 |
+
parser = transformers.HfArgumentParser(
|
| 199 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 200 |
+
)
|
| 201 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 202 |
+
|
| 203 |
+
print("==========Model Args=========")
|
| 204 |
+
print(model_args)
|
| 205 |
+
print("==========Data Args=========")
|
| 206 |
+
print(data_args)
|
| 207 |
+
print("==========Training Args=========")
|
| 208 |
+
print(training_args)
|
| 209 |
+
|
| 210 |
+
if training_args.gradient_checkpointing:
|
| 211 |
+
use_cache = False # use_cache与gradient_checkpointing不能同时设置为true
|
| 212 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 213 |
+
model_args.model_name_or_path,
|
| 214 |
+
_attn_implementation="flash_attention_2",
|
| 215 |
+
use_cache=use_cache,
|
| 216 |
+
# save_only_model=True
|
| 217 |
+
).float()
|
| 218 |
+
# model = AutoLigerKernelForCausalLM.from_pretrained(
|
| 219 |
+
# model_args.model_name_or_path,
|
| 220 |
+
# _attn_implementation="flash_attention_2",
|
| 221 |
+
# use_cache=use_cache,
|
| 222 |
+
# # save_only_model=True
|
| 223 |
+
# ).float()
|
| 224 |
+
if model_args.tokenizer_name_or_path is None:
|
| 225 |
+
model_args.tokenizer_name_or_path = model_args.model_name_or_path
|
| 226 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 227 |
+
model_args.tokenizer_name_or_path, model_max_length=training_args.model_max_length
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
if tokenizer.pad_token is None:
|
| 231 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 232 |
+
|
| 233 |
+
# dataset_qa = get_dataset(data_args.data_path, tokenizer)[:5]
|
| 234 |
+
# print(f"qa dataset: {len(dataset_qa)}")
|
| 235 |
+
dataset_math = get_dataset("/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/math_qwq_4524_selected_add_prompt_871.json", tokenizer, True)
|
| 236 |
+
print(f"math dataset: {len(dataset_math)}")
|
| 237 |
+
dataset = dataset_math
|
| 238 |
+
# dataset = get_dataset(data_args.data_path, tokenizer)
|
| 239 |
+
print(f"fianl dataset: {len(dataset)}")
|
| 240 |
+
|
| 241 |
+
data_collator = DataCollatorForSeq2Seq(
|
| 242 |
+
tokenizer=tokenizer,
|
| 243 |
+
label_pad_token_id=-100,
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
trainer = Trainer(
|
| 247 |
+
model=model,
|
| 248 |
+
args=training_args,
|
| 249 |
+
tokenizer=tokenizer,
|
| 250 |
+
data_collator=data_collator,
|
| 251 |
+
train_dataset=dataset,
|
| 252 |
+
)
|
| 253 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 254 |
+
trainer.save_model(training_args.output_dir)
|
| 255 |
+
trainer.save_state()
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
if __name__ == "__main__":
|
| 259 |
+
torch.manual_seed(42)
|
| 260 |
+
train()
|
deep_search/sft/sft_2_wo_mask.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import Optional, Dict, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import random_split
|
| 11 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 12 |
+
import transformers
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
from transformers import Trainer
|
| 15 |
+
import random
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from transformers import AutoModelForCausalLM, TrainingArguments
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
from transformers import DataCollatorForSeq2Seq
|
| 20 |
+
import shutil
|
| 21 |
+
|
| 22 |
+
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import matplotlib.pyplot as plt
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class ModelArguments:
|
| 31 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 32 |
+
# flash_attention: Optional[bool] = field(default=False)
|
| 33 |
+
tokenizer_name_or_path: Optional[str] = field(default=None)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class DataArguments:
|
| 38 |
+
data_path: str = field(
|
| 39 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 40 |
+
)
|
| 41 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 42 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 47 |
+
cache_dir: Optional[str] = field(default=None)
|
| 48 |
+
optim: str = field(default="adamw_torch")
|
| 49 |
+
model_max_length: int = field(
|
| 50 |
+
default=512,
|
| 51 |
+
metadata={
|
| 52 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 53 |
+
},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
IGNORE_INDEX = -100
|
| 58 |
+
MAX_LENGTH = 2000
|
| 59 |
+
|
| 60 |
+
def process(sample, tokenizer):
|
| 61 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 62 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 63 |
+
source = sample["input"]
|
| 64 |
+
# print(source)
|
| 65 |
+
# print(tokenizer.bos_token)
|
| 66 |
+
# print(source == None)
|
| 67 |
+
# if tokenizer.bos_token not in source:
|
| 68 |
+
# source = tokenizer.apply_chat_template(
|
| 69 |
+
# [
|
| 70 |
+
# {'role': 'user', 'content': source}
|
| 71 |
+
# ],
|
| 72 |
+
# tokenize=False, add_generation_prompt=True
|
| 73 |
+
# )
|
| 74 |
+
# # print(source)
|
| 75 |
+
source = tokenizer.apply_chat_template(
|
| 76 |
+
[
|
| 77 |
+
{'role': 'user', 'content': source}
|
| 78 |
+
],
|
| 79 |
+
tokenize=False, add_generation_prompt=True
|
| 80 |
+
)
|
| 81 |
+
# print(source)
|
| 82 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 83 |
+
target = [IGNORE_INDEX] * len(source)
|
| 84 |
+
for output in sample["output"]:
|
| 85 |
+
for k, v in output.items():
|
| 86 |
+
if v is None:
|
| 87 |
+
continue
|
| 88 |
+
v_tokens = tokenizer(v, add_special_tokens=False)["input_ids"]
|
| 89 |
+
if k in ["gen"]:
|
| 90 |
+
source += v_tokens
|
| 91 |
+
target += v_tokens
|
| 92 |
+
elif k in ["doc_gen"]:
|
| 93 |
+
source += v_tokens
|
| 94 |
+
target += [IGNORE_INDEX] * len(v_tokens)
|
| 95 |
+
input_ids = source
|
| 96 |
+
labels = target
|
| 97 |
+
|
| 98 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 99 |
+
labels.append(tokenizer.eos_token_id)
|
| 100 |
+
# if tokenizer.eos_token_id not in source:
|
| 101 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 102 |
+
# if tokenizer.eos_token_id not in labels:
|
| 103 |
+
# labels.append(tokenizer.eos_token_id)
|
| 104 |
+
|
| 105 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 106 |
+
# return None
|
| 107 |
+
result = {
|
| 108 |
+
"input_ids": input_ids,
|
| 109 |
+
"attention_mask": [1] * len(input_ids),
|
| 110 |
+
"labels": labels,
|
| 111 |
+
}
|
| 112 |
+
# print(result)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def process_math(sample, tokenizer):
|
| 117 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 118 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 119 |
+
source = sample["input"]
|
| 120 |
+
source = tokenizer.apply_chat_template(
|
| 121 |
+
[
|
| 122 |
+
{'role': 'user', 'content': source}
|
| 123 |
+
],
|
| 124 |
+
tokenize=False, add_generation_prompt=True
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 128 |
+
target = [IGNORE_INDEX] * len(source)
|
| 129 |
+
|
| 130 |
+
output = sample["output_text"]
|
| 131 |
+
output = tokenizer(output, add_special_tokens=False)["input_ids"]
|
| 132 |
+
|
| 133 |
+
source += output
|
| 134 |
+
target += output
|
| 135 |
+
|
| 136 |
+
input_ids = source
|
| 137 |
+
labels = target
|
| 138 |
+
|
| 139 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 140 |
+
labels.append(tokenizer.eos_token_id)
|
| 141 |
+
# if tokenizer.eos_token_id not in source:
|
| 142 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 143 |
+
# if tokenizer.eos_token_id not in labels:
|
| 144 |
+
# labels.append(tokenizer.eos_token_id)
|
| 145 |
+
|
| 146 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 147 |
+
# return None
|
| 148 |
+
result = {
|
| 149 |
+
"input_ids": input_ids,
|
| 150 |
+
"attention_mask": [1] * len(input_ids),
|
| 151 |
+
"labels": labels,
|
| 152 |
+
}
|
| 153 |
+
# print(result)
|
| 154 |
+
return result
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def print_function(example, tokenizer):
|
| 159 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
| 160 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
| 161 |
+
print("label_ids:\n{}".format(example["labels"]))
|
| 162 |
+
print("labels:\n{}".format(
|
| 163 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
| 164 |
+
))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def get_dataset(file_path, tokenizer, math_dataset=False):
|
| 168 |
+
dataset = load_dataset('json', data_files=file_path)
|
| 169 |
+
train_dataset = dataset["train"]
|
| 170 |
+
file_name = os.path.basename(file_path)
|
| 171 |
+
dataset_name = os.path.splitext(file_name)[0]
|
| 172 |
+
# print(f"dataset_name: {dataset_name}")
|
| 173 |
+
# if os.path.exists(f"input/real_cache/{dataset_name}/"):
|
| 174 |
+
# shutil.rmtree(f"input/real_cache/{dataset_name}/")
|
| 175 |
+
# os.makedirs(f"input/real_cache/{dataset_name}/", exist_ok=True)
|
| 176 |
+
# tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, cache_file_name=f"input/real_cache/{dataset_name}/cache1.arrow")
|
| 177 |
+
|
| 178 |
+
if math_dataset:
|
| 179 |
+
print("math dataste")
|
| 180 |
+
tokenized_dataset = train_dataset.map(process_math, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 181 |
+
else:
|
| 182 |
+
tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 183 |
+
print_function(next(iter(tokenized_dataset)), tokenizer)
|
| 184 |
+
print(f"len of dataset before filter: {len(tokenized_dataset)}")
|
| 185 |
+
|
| 186 |
+
print(f"type of tokenized_dataset: {type(tokenized_dataset)}")
|
| 187 |
+
|
| 188 |
+
filtered_dataset = []
|
| 189 |
+
for item in tokenized_dataset:
|
| 190 |
+
# if len(item["input_ids"]) <= 10000:
|
| 191 |
+
# print(item)
|
| 192 |
+
print(f"type of item: {type(item)}")
|
| 193 |
+
filtered_dataset.append(item)
|
| 194 |
+
print(f"len of dataset after filter: {len(filtered_dataset)}")
|
| 195 |
+
return filtered_dataset
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def train():
|
| 199 |
+
parser = transformers.HfArgumentParser(
|
| 200 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 201 |
+
)
|
| 202 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 203 |
+
|
| 204 |
+
print("==========Model Args=========")
|
| 205 |
+
print(model_args)
|
| 206 |
+
print("==========Data Args=========")
|
| 207 |
+
print(data_args)
|
| 208 |
+
print("==========Training Args=========")
|
| 209 |
+
print(training_args)
|
| 210 |
+
|
| 211 |
+
if training_args.gradient_checkpointing:
|
| 212 |
+
use_cache = False # use_cache与gradient_checkpointing不能同时设置为true
|
| 213 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 214 |
+
model_args.model_name_or_path,
|
| 215 |
+
_attn_implementation="flash_attention_2",
|
| 216 |
+
use_cache=use_cache,
|
| 217 |
+
# save_only_model=True
|
| 218 |
+
).float()
|
| 219 |
+
# model = AutoLigerKernelForCausalLM.from_pretrained(
|
| 220 |
+
# model_args.model_name_or_path,
|
| 221 |
+
# _attn_implementation="flash_attention_2",
|
| 222 |
+
# use_cache=use_cache,
|
| 223 |
+
# # save_only_model=True
|
| 224 |
+
# ).float()
|
| 225 |
+
if model_args.tokenizer_name_or_path is None:
|
| 226 |
+
model_args.tokenizer_name_or_path = model_args.model_name_or_path
|
| 227 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 228 |
+
model_args.tokenizer_name_or_path, model_max_length=training_args.model_max_length
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
if tokenizer.pad_token is None:
|
| 232 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 233 |
+
|
| 234 |
+
dataset_qa = get_dataset(data_args.data_path, tokenizer, True)
|
| 235 |
+
print(f"qa dataset: {len(dataset_qa)}")
|
| 236 |
+
# dataset_math = get_dataset("/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/math_qwq_4524_selected_add_prompt_871.json", tokenizer, True)
|
| 237 |
+
# print(f"math dataset: {len(dataset_math)}")
|
| 238 |
+
dataset = dataset_qa
|
| 239 |
+
# dataset = get_dataset(data_args.data_path, tokenizer)
|
| 240 |
+
print(f"fianl dataset: {len(dataset)}")
|
| 241 |
+
|
| 242 |
+
data_collator = DataCollatorForSeq2Seq(
|
| 243 |
+
tokenizer=tokenizer,
|
| 244 |
+
label_pad_token_id=-100,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
trainer = Trainer(
|
| 248 |
+
model=model,
|
| 249 |
+
args=training_args,
|
| 250 |
+
tokenizer=tokenizer,
|
| 251 |
+
data_collator=data_collator,
|
| 252 |
+
train_dataset=dataset,
|
| 253 |
+
)
|
| 254 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 255 |
+
trainer.save_model(training_args.output_dir)
|
| 256 |
+
trainer.save_state()
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
if __name__ == "__main__":
|
| 260 |
+
torch.manual_seed(42)
|
| 261 |
+
train()
|
deep_search/sft/test.ipynb
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import os\n",
|
| 10 |
+
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
|
| 11 |
+
"# os.environ[\"CUDA_VISIBLE_DEVICES\"] = '6,7'\n",
|
| 12 |
+
"model_path = \"/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32B\"\n",
|
| 13 |
+
"\n",
|
| 14 |
+
"# model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto')\n",
|
| 15 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_path)\n",
|
| 16 |
+
"\n",
|
| 17 |
+
"inputs = tokenizer.apply_chat_template(\n",
|
| 18 |
+
" [\n",
|
| 19 |
+
" {'role': 'user', 'content': \"I hate you\"}\n",
|
| 20 |
+
" ],\n",
|
| 21 |
+
" tokenize=False, add_generation_prompt=True\n",
|
| 22 |
+
")\n",
|
| 23 |
+
"print(inputs)"
|
| 24 |
+
]
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"cell_type": "code",
|
| 28 |
+
"execution_count": null,
|
| 29 |
+
"metadata": {},
|
| 30 |
+
"outputs": [],
|
| 31 |
+
"source": [
|
| 32 |
+
"import json\n",
|
| 33 |
+
"\n",
|
| 34 |
+
"\n",
|
| 35 |
+
"\n",
|
| 36 |
+
"def load_json(file_path):\n",
|
| 37 |
+
" with open(file_path, \"r\", encoding=\"utf-8\") as file:\n",
|
| 38 |
+
" data = json.load(file)\n",
|
| 39 |
+
" print(f\"Loaded {len(data)} items from {file_path}\")\n",
|
| 40 |
+
" return data\n",
|
| 41 |
+
"\n",
|
| 42 |
+
"def save_json(data, file_path):\n",
|
| 43 |
+
" with open(file_path, \"w\", encoding=\"utf-8\") as file:\n",
|
| 44 |
+
" json.dump(data, file, ensure_ascii=False, indent=4)\n",
|
| 45 |
+
" print(f\"Saved {len(data)} items to {file_path}\")\n",
|
| 46 |
+
"\n",
|
| 47 |
+
"data = load_json(\"/share/project/sunshuang/deep_search/search_o1/sft_data/new_instruction_2k_sft.json\")"
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
],
|
| 51 |
+
"metadata": {
|
| 52 |
+
"language_info": {
|
| 53 |
+
"name": "python"
|
| 54 |
+
}
|
| 55 |
+
},
|
| 56 |
+
"nbformat": 4,
|
| 57 |
+
"nbformat_minor": 2
|
| 58 |
+
}
|
deep_search/sft/test_len.sh
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# qwen 7b 用自己的tokenizer
|
| 3 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 4 |
+
# --master_port=9944 \
|
| 5 |
+
# --include localhost:6,7 \
|
| 6 |
+
# sft.py \
|
| 7 |
+
# --deepspeed ds_zero3.json \
|
| 8 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 9 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 10 |
+
# --do_train \
|
| 11 |
+
# --save_safetensors true \
|
| 12 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 13 |
+
# --lr_scheduler_type cosine \
|
| 14 |
+
# --output_dir output/sft_use_original_tokenizer/qwen_7b_original_tokenizer_inst_data_1217_1 \
|
| 15 |
+
# --overwrite_output_dir \
|
| 16 |
+
# --warmup_ratio 0.03 \
|
| 17 |
+
# --gradient_checkpointing true \
|
| 18 |
+
# --per_device_train_batch_size 1 \
|
| 19 |
+
# --gradient_accumulation_steps 4 \
|
| 20 |
+
# --logging_steps 1 \
|
| 21 |
+
# --learning_rate 2e-5 \
|
| 22 |
+
# --num_train_epochs 1 \
|
| 23 |
+
# --model_max_length 8192 \
|
| 24 |
+
# --save_total_limit 16 \
|
| 25 |
+
# --bf16 || exit 1
|
deep_search/sft/test_two_model_qwq.sh
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_save_path=$1
|
| 2 |
+
model_name=$2
|
| 3 |
+
dataset=$3
|
| 4 |
+
# cd /share/project/sunshuang/deep_search/search_o1
|
| 5 |
+
|
| 6 |
+
# CUDA_VISIBLE_DEVICES=0,1 nohup \
|
| 7 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/vllm serve /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 8 |
+
# --tensor-parallel-size=2 \
|
| 9 |
+
# --gpu-memory-utilization 0.95 \
|
| 10 |
+
# --port 8001 \
|
| 11 |
+
# > vllm-Qwen32B.log 2>&1 &
|
| 12 |
+
# tail -f vllm-Qwen32B.log
|
| 13 |
+
# exit 1
|
| 14 |
+
|
| 15 |
+
# sleep 300 # 等待5min
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
cd /opt/aps/workdir/sunshuang/deep_search/sft
|
| 19 |
+
|
| 20 |
+
echo "start eval ${model_save_path}"
|
| 21 |
+
|
| 22 |
+
# /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_1.py ${model_save_path} > 3-5-2013qa_500math_doc_by_qwen.log 2>&1
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
sleep 300 # 等待5min
|
| 27 |
+
|
| 28 |
+
echo "vllm serve qwq"
|
| 29 |
+
|
| 30 |
+
export CUDA_VISIBLE_DEVICES=0,1
|
| 31 |
+
nohup vllm serve /capacity/userdata/models/QwQ-32B \
|
| 32 |
+
--tensor-parallel-size=2 \
|
| 33 |
+
--gpu-memory-utilization 0.95 \
|
| 34 |
+
--port 8003 > /opt/aps/workdir/sunshuang/deep_search/search_o1/sft_logs/vllm_serve_qwq.log 2>&1 &
|
| 35 |
+
|
| 36 |
+
echo "finish vllm serve qwq"
|
| 37 |
+
|
| 38 |
+
sleep 300
|
| 39 |
+
|
| 40 |
+
nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq.py ${model_save_path} > 5-5_${dataset}_${model_name}.log 2>&1 &
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq.py /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation > 5-6_1.log 2>&1 &
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871 > 4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871_2.log 2>&1 &
|
| 47 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871 > 4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search_2.log 2>&1 &
|
| 48 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft > 3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-32B-Instruct_eval.log 2>&1 &
|
| 49 |
+
# /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft
|
| 50 |
+
# /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft
|
| 51 |
+
# # nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt_check.py /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft > 3-26_selected_data_1174_sft_doc_by_itself_QwQ-32B_2.log 2>&1 &
|
| 52 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u wait_eval_use_one_model_for_ckpt.py /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469 > 3-28_merged_1174_zh_296_sft_1469_doc_by_itself_QwQ-32B.log 2>&1 &
|
| 53 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u wait_eval_use_one_model_for_ckpt.py /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829 > 3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_3.log 2>&1 &
|
| 54 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_worker1.py /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097 > 4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_3.log 2>&1 &
|
| 55 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt.py /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871 > 4-4_no_error_data_871_doc_by_itself_DeepSeek-R1-Distill-Qwen-32.log 2>&1 &
|
| 56 |
+
|
| 57 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533 > 3-27_merged_syn_long_359_sft_1533_doc_by_itself_QwQ-32B_1.log 2>&1 &
|
| 58 |
+
|
| 59 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans > 3-26_strict_selected_1526_sft_format_ans_doc_by_itself_QwQ-32B_1.log 2>&1 &
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462 > 3-20-merged_2462_remove_exp_doc_by_itself_qwq.log 2>&1 &
|
| 63 |
+
|
| 64 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4 > 3-12-1200qa_doc_by_itself_r1_32b.log 2>&1 &
|
deep_search/sft/test_two_model_qwq_1.sh
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_save_path=$1
|
| 2 |
+
model_name=$2
|
| 3 |
+
dataset=$3
|
| 4 |
+
# cd /share/project/sunshuang/deep_search/search_o1
|
| 5 |
+
|
| 6 |
+
# CUDA_VISIBLE_DEVICES=0,1 nohup \
|
| 7 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/vllm serve /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 8 |
+
# --tensor-parallel-size=2 \
|
| 9 |
+
# --gpu-memory-utilization 0.95 \
|
| 10 |
+
# --port 8001 \
|
| 11 |
+
# > vllm-Qwen32B.log 2>&1 &
|
| 12 |
+
# tail -f vllm-Qwen32B.log
|
| 13 |
+
# exit 1
|
| 14 |
+
|
| 15 |
+
# sleep 300 # 等待5min
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
cd /opt/aps/workdir/sunshuang/deep_search/sft
|
| 19 |
+
|
| 20 |
+
echo "start eval ${model_save_path}"
|
| 21 |
+
|
| 22 |
+
# /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_1.py ${model_save_path} > 3-5-2013qa_500math_doc_by_qwen.log 2>&1
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
sleep 300 # 等待5min
|
| 27 |
+
|
| 28 |
+
# echo "vllm serve qwq".
|
| 29 |
+
|
| 30 |
+
# export CUDA_VISIBLE_DEVICES=0,1
|
| 31 |
+
# nohup vllm serve /capacity/userdata/models/QwQ-32B \
|
| 32 |
+
# --tensor-parallel-size=2 \
|
| 33 |
+
# --gpu-memory-utilization 0.95 \
|
| 34 |
+
# --port 8003 > /opt/aps/workdir/sunshuang/deep_search/search_o1/sft_logs/vllm_serve_qwq_1.log 2>&1 &
|
| 35 |
+
|
| 36 |
+
# # echo "finish vllm serve qwq"
|
| 37 |
+
|
| 38 |
+
# sleep 300
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq.py ${model_save_path} > 5-5_${dataset}_${model_name}.log 2>&1 &
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq_1.py ${model_save_path} > 4-25_${dataset}_doc_by_itself_${model_name}_1.log 2>&1 &
|
| 46 |
+
|
| 47 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq_1.py /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math > 4-25_no_error_data_871_doc_by_itself_QwQ-32B_1.log 2>&1 &
|
| 48 |
+
|
| 49 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq.py /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math > 4-25_no_error_data_871_doc_by_itself_QwQ-32B.log 2>&1 &
|
| 50 |
+
|
| 51 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871 > 4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871_2.log 2>&1 &
|
| 52 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871 > 4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search_2.log 2>&1 &
|
| 53 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft > 3-25-strict_selected_1526_sft_doc_by_itself_Qwen2.5-32B-Instruct_eval.log 2>&1 &
|
| 54 |
+
# /share/project/sunshuang/deep_search/output/JOB:13192#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft
|
| 55 |
+
# /share/project/sunshuang/deep_search/output/JOB:2934#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_strict_selected_1526_sft
|
| 56 |
+
# # nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt_check.py /share/project/sunshuang/deep_search/output/JOB:11783#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_selected_data_1174_sft > 3-26_selected_data_1174_sft_doc_by_itself_QwQ-32B_2.log 2>&1 &
|
| 57 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u wait_eval_use_one_model_for_ckpt.py /capacity/userdata/ss/sft_search/JOB:30386#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_zh_296_sft_1469 > 3-28_merged_1174_zh_296_sft_1469_doc_by_itself_QwQ-32B.log 2>&1 &
|
| 58 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u wait_eval_use_one_model_for_ckpt.py /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829 > 3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_3.log 2>&1 &
|
| 59 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_worker1.py /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097 > 4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_3.log 2>&1 &
|
| 60 |
+
# nohup /opt/aps/workdir/search_o1/bin/python3 -u /opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt.py /capacity/userdata/ss/sft_search/JOB:17315#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32#TOKEN:DeepSeek-R1-Distill-Qwen-32#BSZ:2#ACC:4_no_error_data_871 > 4-4_no_error_data_871_doc_by_itself_DeepSeek-R1-Distill-Qwen-32.log 2>&1 &
|
| 61 |
+
|
| 62 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:27033#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_syn_long_359_sft_1533 > 3-27_merged_syn_long_359_sft_1533_doc_by_itself_QwQ-32B_1.log 2>&1 &
|
| 63 |
+
|
| 64 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:2401#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_strict_selected_1526_sft_format_ans > 3-26_strict_selected_1526_sft_format_ans_doc_by_itself_QwQ-32B_1.log 2>&1 &
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:12665#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_new_instruction_1161_1301_sft_remove_exp_2462 > 3-20-merged_2462_remove_exp_doc_by_itself_qwq.log 2>&1 &
|
| 68 |
+
|
| 69 |
+
# nohup /share/project/miniconda/envs/search_o1/bin/python -u wait_eval_use_one_model_for_ckpt.py /share/project/sunshuang/deep_search/output/JOB:487#LR:1e-5#BASE:DeepSeek-R1-Distill-Qwen-32B#TOKEN:DeepSeek-R1-Distill-Qwen-32B#BSZ:2#ACC:4 > 3-12-1200qa_doc_by_itself_r1_32b.log 2>&1 &
|
deep_search/sft/train-inst.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import Optional, Dict, Sequence
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 10 |
+
import transformers
|
| 11 |
+
from torch.utils.data import Dataset
|
| 12 |
+
from transformers import Trainer
|
| 13 |
+
import random
|
| 14 |
+
from typing import List, Optional, Tuple, Union
|
| 15 |
+
from transformers.models.llama.modeling_llama import (
|
| 16 |
+
LlamaForCausalLM,
|
| 17 |
+
LlamaAttention,
|
| 18 |
+
apply_rotary_pos_emb,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
IGNORE_INDEX = -100
|
| 23 |
+
DEFAULT_PAD_TOKEN = "[PAD]"
|
| 24 |
+
DEFAULT_EOS_TOKEN = "</s>"
|
| 25 |
+
DEFAULT_BOS_TOKEN = "<s>"
|
| 26 |
+
DEFAULT_UNK_TOKEN = "[UNK]"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def apply_template_self(example, tokenizer):
|
| 31 |
+
system_prompt='''You are a helpful assistant. Given a question, you should answer it by first thinking about the reasoning process in the mind and then providing the final answer. The output format of reasoning process and final answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., "<think> reasoning process here </think>\n\n<answer> final answer here </answer>". During the thinking process, you can perform searching for uncertain knowledge if necessary with the format of "<|begin_of_query|> search query (only keywords) here <|end_of_query|>". Then, the system will provide the Assistant with helpful information with the format of "<|begin_of_documents|> ...search results... <|end_of_documents|>"'''
|
| 32 |
+
|
| 33 |
+
prompt = tokenizer.apply_chat_template(
|
| 34 |
+
[
|
| 35 |
+
{"role": "system", "content": system_prompt},
|
| 36 |
+
{"role": "user", "content": example},
|
| 37 |
+
],
|
| 38 |
+
tokenize=False,
|
| 39 |
+
add_generation_prompt=True,
|
| 40 |
+
)
|
| 41 |
+
# print(prompt)
|
| 42 |
+
return prompt
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def apply_template(example, tokenizer):
|
| 48 |
+
system_prompt="You are skilled at solving problems through step-by-step reasoning, and you can choose different actions to address the problems."
|
| 49 |
+
user_prompt='''Use the following five actions to solve the problem step by step:
|
| 50 |
+
<Sub-question Decompose>: Plan the next sub-question that needs to be solved.
|
| 51 |
+
<Sub-question Answer>: Directly answer or utilize the retrieved documents to answer a sub-question.
|
| 52 |
+
<Search>: Utilize external the external Search Engine to retrieve knowledge. **When calling the search engine, you need to encapsulate the query keywords within <|begin_of_query|><|end_of_query|> tags**.
|
| 53 |
+
<Reflect>: Reflect on past actions and their outcomes.
|
| 54 |
+
<Final Answer>: Obtain the final answer to the original complex question, and **output the final answer in \\boxed{{}}**.
|
| 55 |
+
|
| 56 |
+
## Question
|
| 57 |
+
{question}'''
|
| 58 |
+
|
| 59 |
+
prompt = tokenizer.apply_chat_template(
|
| 60 |
+
[
|
| 61 |
+
{"role": "system", "content": system_prompt},
|
| 62 |
+
{"role": "user", "content": user_prompt.replace("{question}",example)},
|
| 63 |
+
],
|
| 64 |
+
tokenize=False,
|
| 65 |
+
add_generation_prompt=True,
|
| 66 |
+
)
|
| 67 |
+
prompt = prompt+"\n## Solution"
|
| 68 |
+
# print(prompt)
|
| 69 |
+
# kill
|
| 70 |
+
return prompt
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@dataclass
|
| 75 |
+
class ModelArguments:
|
| 76 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 77 |
+
flash_attention: Optional[bool] = field(default=False)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@dataclass
|
| 81 |
+
class DataArguments:
|
| 82 |
+
data_path: str = field(
|
| 83 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 84 |
+
)
|
| 85 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 86 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@dataclass
|
| 90 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 91 |
+
cache_dir: Optional[str] = field(default=None)
|
| 92 |
+
optim: str = field(default="adamw_torch")
|
| 93 |
+
model_max_length: int = field(
|
| 94 |
+
default=512,
|
| 95 |
+
metadata={
|
| 96 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 97 |
+
},
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
|
| 102 |
+
"""Collects the state dict and dump to disk."""
|
| 103 |
+
state_dict = trainer.model.state_dict()
|
| 104 |
+
if trainer.args.should_save:
|
| 105 |
+
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
|
| 106 |
+
del state_dict
|
| 107 |
+
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def smart_tokenizer_and_embedding_resize(
|
| 111 |
+
special_tokens_dict: Dict,
|
| 112 |
+
tokenizer: transformers.PreTrainedTokenizer,
|
| 113 |
+
model: transformers.PreTrainedModel,
|
| 114 |
+
):
|
| 115 |
+
"""Resize tokenizer and embedding.
|
| 116 |
+
|
| 117 |
+
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
|
| 118 |
+
"""
|
| 119 |
+
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
|
| 120 |
+
model.resize_token_embeddings(len(tokenizer))
|
| 121 |
+
|
| 122 |
+
if num_new_tokens > 0:
|
| 123 |
+
input_embeddings = model.get_input_embeddings().weight.data
|
| 124 |
+
output_embeddings = model.get_output_embeddings().weight.data
|
| 125 |
+
|
| 126 |
+
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
|
| 127 |
+
dim=0, keepdim=True
|
| 128 |
+
)
|
| 129 |
+
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
|
| 130 |
+
dim=0, keepdim=True
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
input_embeddings[-num_new_tokens:] = input_embeddings_avg
|
| 134 |
+
output_embeddings[-num_new_tokens:] = output_embeddings_avg
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class SupervisedDataset(Dataset):
|
| 138 |
+
"""Dataset for supervised fine-tuning."""
|
| 139 |
+
|
| 140 |
+
def __init__(
|
| 141 |
+
self,
|
| 142 |
+
data_path: str,
|
| 143 |
+
prompt_type: str,
|
| 144 |
+
tokenizer: transformers.PreTrainedTokenizer,
|
| 145 |
+
):
|
| 146 |
+
super(SupervisedDataset, self).__init__()
|
| 147 |
+
logging.warning("Loading data...")
|
| 148 |
+
# prompt_simple_inference = PROMPT_DICT["alpaca_format"]
|
| 149 |
+
self.sources, self.targets = [], []
|
| 150 |
+
for path in data_path.split(","):
|
| 151 |
+
with open(path, "r") as f:
|
| 152 |
+
for i, line in enumerate(f.readlines()):
|
| 153 |
+
try:
|
| 154 |
+
c = json.loads(line)
|
| 155 |
+
except:
|
| 156 |
+
print(path)
|
| 157 |
+
print(line)
|
| 158 |
+
raise ValueError
|
| 159 |
+
|
| 160 |
+
if "question" in c:
|
| 161 |
+
input_text = c["question"]
|
| 162 |
+
source = apply_template(input_text, tokenizer)
|
| 163 |
+
else:
|
| 164 |
+
source = c["prompt"]
|
| 165 |
+
self.sources.append(source.strip())
|
| 166 |
+
|
| 167 |
+
if "solution" in c:
|
| 168 |
+
output_text = c["solution"]
|
| 169 |
+
else:
|
| 170 |
+
output_text = c["chosen"]
|
| 171 |
+
self.targets.append(source + output_text + tokenizer.eos_token)
|
| 172 |
+
print("Train Dataset Length: ",len(self.sources))
|
| 173 |
+
|
| 174 |
+
def __len__(self):
|
| 175 |
+
return len(self.sources)
|
| 176 |
+
|
| 177 |
+
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
|
| 178 |
+
return dict(input_ids=self.sources[i], labels=self.targets[i])
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
@dataclass
|
| 182 |
+
class DataCollatorForSupervisedDataset(object):
|
| 183 |
+
"""Collate examples for supervised fine-tuning."""
|
| 184 |
+
|
| 185 |
+
data_args: DataArguments
|
| 186 |
+
tokenizer: transformers.PreTrainedTokenizer
|
| 187 |
+
|
| 188 |
+
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
|
| 189 |
+
# print(instances)
|
| 190 |
+
# print("======="*30)
|
| 191 |
+
|
| 192 |
+
inputs = self.tokenizer(
|
| 193 |
+
text=[instance["labels"] for instance in instances],
|
| 194 |
+
text_target=[instance["input_ids"] for instance in instances],
|
| 195 |
+
return_tensors="pt",
|
| 196 |
+
padding="longest",
|
| 197 |
+
max_length=self.tokenizer.model_max_length,
|
| 198 |
+
truncation=True,
|
| 199 |
+
return_attention_mask=True,
|
| 200 |
+
)
|
| 201 |
+
labels = copy.deepcopy(inputs["input_ids"])
|
| 202 |
+
labels[labels == self.tokenizer.pad_token_id] = IGNORE_INDEX
|
| 203 |
+
labels[torch.where(inputs["labels"] != self.tokenizer.pad_token_id)] = (
|
| 204 |
+
IGNORE_INDEX
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
inputs["labels"] = labels
|
| 208 |
+
|
| 209 |
+
for k in range(len(labels)):
|
| 210 |
+
label = torch.clone(inputs['labels'][k])
|
| 211 |
+
input_id = torch.clone(inputs['input_ids'][k])
|
| 212 |
+
attention_mask = torch.clone(inputs['attention_mask'][k])
|
| 213 |
+
# print("Label:",label.tolist())
|
| 214 |
+
# print("Input-ID:",input_id.tolist())
|
| 215 |
+
|
| 216 |
+
start_tokens = torch.tensor([27, 91, 7265, 3575, 75927, 91, 397]) # <|begin_of_documents|>\n
|
| 217 |
+
end_tokens = torch.tensor([408, 3575, 75927, 91, 1339])
|
| 218 |
+
|
| 219 |
+
is_in_masking = False
|
| 220 |
+
mask_start_idx = -1
|
| 221 |
+
start_count = 0
|
| 222 |
+
end_count = 0
|
| 223 |
+
for m in range(len(input_id)):
|
| 224 |
+
if label[m] == 0:
|
| 225 |
+
continue
|
| 226 |
+
|
| 227 |
+
if not is_in_masking and m + len(start_tokens) <= len(input_id):
|
| 228 |
+
if torch.equal(input_id[m:m + len(start_tokens)], start_tokens):
|
| 229 |
+
start_count += 1
|
| 230 |
+
is_in_masking = True
|
| 231 |
+
mask_start_idx = m
|
| 232 |
+
label[m:m + len(start_tokens)] = -100
|
| 233 |
+
|
| 234 |
+
if is_in_masking:
|
| 235 |
+
label[m] = -100
|
| 236 |
+
if m + len(end_tokens) <= len(input_id):
|
| 237 |
+
if torch.equal(input_id[m:m + len(end_tokens)], end_tokens):
|
| 238 |
+
end_count += 1 # 增加 end_tokens 计数
|
| 239 |
+
is_in_masking = False # 结束 mask
|
| 240 |
+
label[m:m + len(end_tokens)] = -100
|
| 241 |
+
mask_start_idx = -1
|
| 242 |
+
if start_count != end_count:
|
| 243 |
+
print("Important Error!",start_count,end_count)
|
| 244 |
+
raise ValueError("128017 and 128018 must be in pairs.")
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
inputs['labels'][k] = label
|
| 248 |
+
inputs['input_ids'][k] = input_id
|
| 249 |
+
inputs['attention_mask'][k] = attention_mask
|
| 250 |
+
|
| 251 |
+
torch.set_printoptions(profile="full")
|
| 252 |
+
# print(inputs)
|
| 253 |
+
|
| 254 |
+
# exit()
|
| 255 |
+
|
| 256 |
+
return inputs
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def make_supervised_data_module(
|
| 260 |
+
tokenizer: transformers.PreTrainedTokenizer, data_args
|
| 261 |
+
) -> Dict:
|
| 262 |
+
"""Make dataset and collator for supervised fine-tuning."""
|
| 263 |
+
train_dataset = SupervisedDataset(
|
| 264 |
+
tokenizer=tokenizer,
|
| 265 |
+
prompt_type=data_args.prompt_type,
|
| 266 |
+
data_path=data_args.data_path,
|
| 267 |
+
)
|
| 268 |
+
print(train_dataset)
|
| 269 |
+
data_collator = DataCollatorForSupervisedDataset(
|
| 270 |
+
data_args=data_args, tokenizer=tokenizer
|
| 271 |
+
)
|
| 272 |
+
return dict(
|
| 273 |
+
train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def train():
|
| 278 |
+
parser = transformers.HfArgumentParser(
|
| 279 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 280 |
+
)
|
| 281 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 282 |
+
|
| 283 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(
|
| 284 |
+
model_args.model_name_or_path,
|
| 285 |
+
cache_dir=training_args.cache_dir,
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 289 |
+
model_args.model_name_or_path,
|
| 290 |
+
cache_dir=training_args.cache_dir,
|
| 291 |
+
model_max_length=training_args.model_max_length,
|
| 292 |
+
padding_side="right",
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
if "llama" in model_args.model_name_or_path.lower():
|
| 296 |
+
tokenizer.pad_token=tokenizer.eos_token
|
| 297 |
+
|
| 298 |
+
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
|
| 299 |
+
|
| 300 |
+
trainer = Trainer(
|
| 301 |
+
model=model, tokenizer=tokenizer, args=training_args, **data_module
|
| 302 |
+
)
|
| 303 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 304 |
+
trainer.save_state()
|
| 305 |
+
trainer.save_model(output_dir=training_args.output_dir)
|
| 306 |
+
# safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
if __name__ == "__main__":
|
| 310 |
+
print("222222")
|
| 311 |
+
train()
|
deep_search/sft/train-inst.sh
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# cd /opt/aps/workdir/songhuatong
|
| 4 |
+
PWD=/opt/aps/workdir/sht-RAG_SFT/resultes
|
| 5 |
+
|
| 6 |
+
# source /opt/aps/workdir/songhuatong/.venv/bin/activate
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
BS=$1
|
| 10 |
+
ACC=$2
|
| 11 |
+
LR=$3
|
| 12 |
+
WARM_UP_RATIO=$4
|
| 13 |
+
DATA_PATH=$5
|
| 14 |
+
|
| 15 |
+
SCRIPT_PATH=/opt/aps/workdir/sht-RAG_SFT/train-inst.py
|
| 16 |
+
MODEL_PATH=/opt/aps/workdir/model/Qwen2.5-7B-Instruct
|
| 17 |
+
DATA_PREFIX=$(basename $DATA_PATH .jsonl)
|
| 18 |
+
JOB_NAME=qwen-2_5-7B-inst_${DATA_PREFIX}_${BS}_${ACC}_${LR}_${WARM_UP_RATIO}
|
| 19 |
+
SAVE_DIR=${PWD}/model
|
| 20 |
+
|
| 21 |
+
export OMP_NUM_THREADS=24
|
| 22 |
+
export WANDB_MODE=offline
|
| 23 |
+
export CUDA_VISIBLE_DEVICES=4,5,6,7
|
| 24 |
+
echo 1111
|
| 25 |
+
echo ${SCRIPT_PATH}
|
| 26 |
+
torchrun --nproc_per_node 4 \
|
| 27 |
+
--nnodes 1 \
|
| 28 |
+
--node_rank 0 \
|
| 29 |
+
--master_addr lmlabide-6427f7a2-728d-4097-a672-471aaf8c507a-worker-0 \
|
| 30 |
+
--master_port 1111 \
|
| 31 |
+
/opt/aps/workdir/sht-RAG_SFT/train-inst.py \
|
| 32 |
+
--model_name_or_path ${MODEL_PATH} \
|
| 33 |
+
--data_path ${DATA_PATH} \
|
| 34 |
+
--bf16 True \
|
| 35 |
+
--output_dir ${SAVE_DIR}/${JOB_NAME} \
|
| 36 |
+
--num_train_epochs 3 \
|
| 37 |
+
--per_device_train_batch_size ${BS} \
|
| 38 |
+
--per_device_eval_batch_size 1 \
|
| 39 |
+
--gradient_accumulation_steps ${ACC} \
|
| 40 |
+
--evaluation_strategy "no" \
|
| 41 |
+
--save_strategy "epoch" \
|
| 42 |
+
--save_total_limit 1 \
|
| 43 |
+
--learning_rate ${LR} \
|
| 44 |
+
--weight_decay 0. \
|
| 45 |
+
--warmup_ratio ${WARM_UP_RATIO} \
|
| 46 |
+
--lr_scheduler_type "cosine" \
|
| 47 |
+
--logging_steps 1 \
|
| 48 |
+
--model_max_length 8192 \
|
| 49 |
+
--deepspeed /opt/aps/workdir/sht-RAG_SFT/dp_3.jsonl \
|
| 50 |
+
--gradient_checkpointing True \
|
| 51 |
+
--report_to none \
|
| 52 |
+
--tf32 True \
|
| 53 |
+
# &> /home/songhuatong/SFT/logs/${JOB_NAME}.log
|
| 54 |
+
|
| 55 |
+
# 四张卡
|
| 56 |
+
# bash /opt/aps/workdir/sht-RAG_SFT/train-inst.sh 2 4 2e-5 0.1 /opt/aps/workdir/sht-RAG_RL/datasets/rollout/sft_data/mixed_sft.jsonl
|
deep_search/sft/train_env.yml
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: ss_train
|
| 2 |
+
channels:
|
| 3 |
+
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge
|
| 4 |
+
- conda-forge
|
| 5 |
+
dependencies:
|
| 6 |
+
- _libgcc_mutex=0.1=conda_forge
|
| 7 |
+
- _openmp_mutex=4.5=2_gnu
|
| 8 |
+
- bzip2=1.0.8=h4bc722e_7
|
| 9 |
+
- ca-certificates=2025.1.31=hbcca054_0
|
| 10 |
+
- ld_impl_linux-64=2.43=h712a8e2_2
|
| 11 |
+
- libexpat=2.6.4=h5888daf_0
|
| 12 |
+
- libffi=3.4.6=h2dba641_0
|
| 13 |
+
- libgcc=14.2.0=h77fa898_1
|
| 14 |
+
- libgcc-ng=14.2.0=h69a702a_1
|
| 15 |
+
- libgomp=14.2.0=h77fa898_1
|
| 16 |
+
- liblzma=5.6.4=hb9d3cd8_0
|
| 17 |
+
- libnsl=2.0.1=hd590300_0
|
| 18 |
+
- libsqlite=3.49.1=hee588c1_1
|
| 19 |
+
- libuuid=2.38.1=h0b41bf4_0
|
| 20 |
+
- libxcrypt=4.4.36=hd590300_1
|
| 21 |
+
- libzlib=1.3.1=hb9d3cd8_2
|
| 22 |
+
- ncurses=6.5=h2d0b736_3
|
| 23 |
+
- openssl=3.4.1=h7b32b05_0
|
| 24 |
+
- pip=25.0.1=pyh8b19718_0
|
| 25 |
+
- python=3.11.11=h9e4cc4f_1_cpython
|
| 26 |
+
- readline=8.2=h8228510_1
|
| 27 |
+
- setuptools=75.8.0=pyhff2d567_0
|
| 28 |
+
- tk=8.6.13=noxft_h4845f30_101
|
| 29 |
+
- wheel=0.45.1=pyhd8ed1ab_1
|
| 30 |
+
- pip:
|
| 31 |
+
- accelerate==0.26.0
|
| 32 |
+
- aiofiles==23.2.1
|
| 33 |
+
- aiohappyeyeballs==2.4.6
|
| 34 |
+
- aiohttp==3.11.12
|
| 35 |
+
- aiosignal==1.3.2
|
| 36 |
+
- altair==5.5.0
|
| 37 |
+
- anyio==4.8.0
|
| 38 |
+
- attrs==25.1.0
|
| 39 |
+
- certifi==2025.1.31
|
| 40 |
+
- charset-normalizer==3.4.1
|
| 41 |
+
- click==8.1.8
|
| 42 |
+
- contourpy==1.3.1
|
| 43 |
+
- cycler==0.12.1
|
| 44 |
+
- datasets==2.15.0
|
| 45 |
+
- deepspeed==0.14.5
|
| 46 |
+
- dill==0.3.7
|
| 47 |
+
- einops==0.8.1
|
| 48 |
+
- fastapi==0.95.1
|
| 49 |
+
- ffmpy==0.5.0
|
| 50 |
+
- filelock==3.17.0
|
| 51 |
+
- fire==0.7.0
|
| 52 |
+
- flash-attn==2.7.1.post1
|
| 53 |
+
- fonttools==4.56.0
|
| 54 |
+
- frozenlist==1.5.0
|
| 55 |
+
- fsspec==2023.10.0
|
| 56 |
+
- gputil==1.4.0
|
| 57 |
+
- gradio==3.38.0
|
| 58 |
+
- gradio-client==1.7.1
|
| 59 |
+
- h11==0.14.0
|
| 60 |
+
- hjson==3.1.0
|
| 61 |
+
- httpcore==1.0.7
|
| 62 |
+
- httpx==0.28.1
|
| 63 |
+
- huggingface-hub==0.29.1
|
| 64 |
+
- idna==3.10
|
| 65 |
+
- jieba==0.42.1
|
| 66 |
+
- jinja2==3.1.5
|
| 67 |
+
- joblib==1.4.2
|
| 68 |
+
- jsonschema==4.23.0
|
| 69 |
+
- jsonschema-specifications==2024.10.1
|
| 70 |
+
- kiwisolver==1.4.8
|
| 71 |
+
- linkify-it-py==2.0.3
|
| 72 |
+
- markdown-it-py==2.2.0
|
| 73 |
+
- markupsafe==2.1.5
|
| 74 |
+
- matplotlib==3.10.0
|
| 75 |
+
- mdit-py-plugins==0.3.3
|
| 76 |
+
- mdurl==0.1.2
|
| 77 |
+
- mpmath==1.3.0
|
| 78 |
+
- multidict==6.1.0
|
| 79 |
+
- multiprocess==0.70.15
|
| 80 |
+
- narwhals==1.27.1
|
| 81 |
+
- networkx==3.4.2
|
| 82 |
+
- ninja==1.11.1.3
|
| 83 |
+
- nltk==3.9.1
|
| 84 |
+
- numpy==1.26.4
|
| 85 |
+
- nvidia-cublas-cu12==12.1.3.1
|
| 86 |
+
- nvidia-cuda-cupti-cu12==12.1.105
|
| 87 |
+
- nvidia-cuda-nvrtc-cu12==12.1.105
|
| 88 |
+
- nvidia-cuda-runtime-cu12==12.1.105
|
| 89 |
+
- nvidia-cudnn-cu12==8.9.2.26
|
| 90 |
+
- nvidia-cufft-cu12==11.0.2.54
|
| 91 |
+
- nvidia-curand-cu12==10.3.2.106
|
| 92 |
+
- nvidia-cusolver-cu12==11.4.5.107
|
| 93 |
+
- nvidia-cusparse-cu12==12.1.0.106
|
| 94 |
+
- nvidia-ml-py==12.570.86
|
| 95 |
+
- nvidia-nccl-cu12==2.18.1
|
| 96 |
+
- nvidia-nvjitlink-cu12==12.8.61
|
| 97 |
+
- nvidia-nvtx-cu12==12.1.105
|
| 98 |
+
- orjson==3.10.15
|
| 99 |
+
- packaging==24.2
|
| 100 |
+
- pandas==2.2.3
|
| 101 |
+
- peft==0.6.1
|
| 102 |
+
- pillow==10.4.0
|
| 103 |
+
- propcache==0.3.0
|
| 104 |
+
- protobuf==5.29.3
|
| 105 |
+
- psutil==7.0.0
|
| 106 |
+
- py-cpuinfo==9.0.0
|
| 107 |
+
- pyarrow==19.0.1
|
| 108 |
+
- pyarrow-hotfix==0.6
|
| 109 |
+
- pydantic==1.10.11
|
| 110 |
+
- pydub==0.25.1
|
| 111 |
+
- pyparsing==3.2.1
|
| 112 |
+
- python-dateutil==2.9.0.post0
|
| 113 |
+
- python-multipart==0.0.20
|
| 114 |
+
- pytz==2025.1
|
| 115 |
+
- pyyaml==6.0.2
|
| 116 |
+
- referencing==0.36.2
|
| 117 |
+
- regex==2024.11.6
|
| 118 |
+
- requests==2.32.3
|
| 119 |
+
- rouge-chinese==1.0.3
|
| 120 |
+
- rpds-py==0.23.1
|
| 121 |
+
- safetensors==0.5.2
|
| 122 |
+
- scipy==1.15.2
|
| 123 |
+
- semantic-version==2.10.0
|
| 124 |
+
- sentencepiece==0.2.0
|
| 125 |
+
- six==1.17.0
|
| 126 |
+
- sniffio==1.3.1
|
| 127 |
+
- sse-starlette==2.1.3
|
| 128 |
+
- starlette==0.26.1
|
| 129 |
+
- sympy==1.13.3
|
| 130 |
+
- termcolor==2.5.0
|
| 131 |
+
- tiktoken==0.9.0
|
| 132 |
+
- tokenizers==0.20.3
|
| 133 |
+
- torch==2.1.0
|
| 134 |
+
- tqdm==4.67.1
|
| 135 |
+
- transformers==4.46.0
|
| 136 |
+
- triton==2.1.0
|
| 137 |
+
- typing-extensions==4.12.2
|
| 138 |
+
- tzdata==2025.1
|
| 139 |
+
- uc-micro-py==1.0.3
|
| 140 |
+
- urllib3==2.3.0
|
| 141 |
+
- uvicorn==0.34.0
|
| 142 |
+
- websockets==11.0.3
|
| 143 |
+
- xxhash==3.5.0
|
| 144 |
+
- yarl==1.18.3
|
| 145 |
+
prefix: /opt/aps/workdir/miniforge3/envs/train
|
deep_search/sft/train_requirements.txt
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch==2.1.0
|
| 2 |
+
transformers==4.46.0
|
| 3 |
+
accelerate==0.26.0
|
| 4 |
+
aiofiles==23.2.1
|
| 5 |
+
aiohappyeyeballs==2.4.6
|
| 6 |
+
aiohttp==3.11.12
|
| 7 |
+
aiosignal==1.3.2
|
| 8 |
+
altair==5.5.0
|
| 9 |
+
anyio==4.8.0
|
| 10 |
+
attrs==25.1.0
|
| 11 |
+
certifi==2025.1.31
|
| 12 |
+
charset-normalizer==3.4.1
|
| 13 |
+
click==8.1.8
|
| 14 |
+
contourpy==1.3.1
|
| 15 |
+
cycler==0.12.1
|
| 16 |
+
datasets==2.15.0
|
| 17 |
+
deepspeed==0.14.5
|
| 18 |
+
dill==0.3.7
|
| 19 |
+
einops==0.8.1
|
| 20 |
+
fastapi==0.95.1
|
| 21 |
+
ffmpy==0.5.0
|
| 22 |
+
filelock==3.17.0
|
| 23 |
+
fire==0.7.0
|
| 24 |
+
# flash-attn==2.7.1.post4
|
| 25 |
+
fonttools==4.56.0
|
| 26 |
+
frozenlist==1.5.0
|
| 27 |
+
fsspec==2023.10.0
|
| 28 |
+
gputil==1.4.0
|
| 29 |
+
gradio==3.38.0
|
| 30 |
+
gradio-client==1.7.1
|
| 31 |
+
h11==0.14.0
|
| 32 |
+
hjson==3.1.0
|
| 33 |
+
httpcore==1.0.7
|
| 34 |
+
httpx==0.28.1
|
| 35 |
+
huggingface-hub==0.29.1
|
| 36 |
+
idna==3.10
|
| 37 |
+
jieba==0.42.1
|
| 38 |
+
jinja2==3.1.5
|
| 39 |
+
joblib==1.4.2
|
| 40 |
+
jsonschema==4.23.0
|
| 41 |
+
jsonschema-specifications==2024.10.1
|
| 42 |
+
kiwisolver==1.4.8
|
| 43 |
+
linkify-it-py==2.0.3
|
| 44 |
+
markdown-it-py==2.2.0
|
| 45 |
+
markupsafe==2.1.5
|
| 46 |
+
matplotlib==3.10.0
|
| 47 |
+
mdit-py-plugins==0.3.3
|
| 48 |
+
mdurl==0.1.2
|
| 49 |
+
mpmath==1.3.0
|
| 50 |
+
multidict==6.1.0
|
| 51 |
+
multiprocess==0.70.15
|
| 52 |
+
narwhals==1.27.1
|
| 53 |
+
networkx==3.4.2
|
| 54 |
+
ninja==1.11.1.3
|
| 55 |
+
nltk==3.9.1
|
| 56 |
+
numpy==1.26.4
|
| 57 |
+
nvidia-cublas-cu12==12.1.3.1
|
| 58 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
| 59 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
| 60 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
| 61 |
+
nvidia-cudnn-cu12==8.9.2.26
|
| 62 |
+
nvidia-cufft-cu12==11.0.2.54
|
| 63 |
+
nvidia-curand-cu12==10.3.2.106
|
| 64 |
+
nvidia-cusolver-cu12==11.4.5.107
|
| 65 |
+
nvidia-cusparse-cu12==12.1.0.106
|
| 66 |
+
nvidia-ml-py==12.570.86
|
| 67 |
+
nvidia-nccl-cu12==2.18.1
|
| 68 |
+
nvidia-nvjitlink-cu12==12.8.61
|
| 69 |
+
nvidia-nvtx-cu12==12.1.105
|
| 70 |
+
orjson==3.10.15
|
| 71 |
+
packaging==24.2
|
| 72 |
+
pandas==2.2.3
|
| 73 |
+
peft==0.6.1
|
| 74 |
+
pillow==10.4.0
|
| 75 |
+
propcache==0.3.0
|
| 76 |
+
protobuf==5.29.3
|
| 77 |
+
psutil==7.0.0
|
| 78 |
+
py-cpuinfo==9.0.0
|
| 79 |
+
pyarrow==19.0.1
|
| 80 |
+
pyarrow-hotfix==0.6
|
| 81 |
+
pydantic==1.10.11
|
| 82 |
+
pydub==0.25.1
|
| 83 |
+
pyparsing==3.2.1
|
| 84 |
+
python-dateutil==2.9.0.post0
|
| 85 |
+
python-multipart==0.0.20
|
| 86 |
+
pytz==2025.1
|
| 87 |
+
pyyaml==6.0.2
|
| 88 |
+
referencing==0.36.2
|
| 89 |
+
regex==2024.11.6
|
| 90 |
+
requests==2.32.3
|
| 91 |
+
rouge-chinese==1.0.3
|
| 92 |
+
rpds-py==0.23.1
|
| 93 |
+
safetensors==0.5.2
|
| 94 |
+
scipy==1.15.2
|
| 95 |
+
semantic-version==2.10.0
|
| 96 |
+
sentencepiece==0.2.0
|
| 97 |
+
six==1.17.0
|
| 98 |
+
sniffio==1.3.1
|
| 99 |
+
sse-starlette==2.1.3
|
| 100 |
+
starlette==0.26.1
|
| 101 |
+
sympy==1.13.3
|
| 102 |
+
termcolor==2.5.0
|
| 103 |
+
tiktoken==0.9.0
|
| 104 |
+
tokenizers==0.20.3
|
| 105 |
+
|
| 106 |
+
tqdm==4.67.1
|
| 107 |
+
|
| 108 |
+
triton==2.1.0
|
| 109 |
+
typing-extensions==4.12.2
|
| 110 |
+
tzdata==2025.1
|
| 111 |
+
uc-micro-py==1.0.3
|
| 112 |
+
urllib3==2.3.0
|
| 113 |
+
uvicorn==0.34.0
|
| 114 |
+
websockets==11.0.3
|
| 115 |
+
xxhash==3.5.0
|
| 116 |
+
yarl==1.18.3
|
deep_search/sft/wait_eval_1.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from time import sleep
|
| 3 |
+
import datetime
|
| 4 |
+
import subprocess
|
| 5 |
+
import json
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import GPUtil
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
wait_cnt = 0
|
| 11 |
+
SLEEP_TIME = 120
|
| 12 |
+
NEED_REVERSE = False
|
| 13 |
+
eppt = None
|
| 14 |
+
NEED_ATLAS = False
|
| 15 |
+
only_rank = False
|
| 16 |
+
commands = Queue()
|
| 17 |
+
|
| 18 |
+
# print(f"wait 120s")
|
| 19 |
+
# sleep(120)
|
| 20 |
+
|
| 21 |
+
def get_free_gpu(threshold=0.95):
|
| 22 |
+
# 获取所有可用的GPU设备
|
| 23 |
+
gpus = GPUtil.getGPUs()
|
| 24 |
+
# 筛选出显存空闲率高于阈值的GPU
|
| 25 |
+
available_gpus = [gpu.id for gpu in gpus if gpu.memoryFree > 80000]
|
| 26 |
+
print(f"available_gpus: {available_gpus}")
|
| 27 |
+
return available_gpus
|
| 28 |
+
|
| 29 |
+
def run(commands):
|
| 30 |
+
gpus = get_free_gpu()
|
| 31 |
+
if NEED_REVERSE:
|
| 32 |
+
gpus.reverse()
|
| 33 |
+
while not commands.empty() and len(gpus) >= 2:
|
| 34 |
+
command = commands.get()
|
| 35 |
+
print(f"The following command is about to run:\n{command.format(device=f'{gpus[0]},{gpus[1]}')}")
|
| 36 |
+
subprocess.Popen(command.format(device=f'{gpus[0]},{gpus[1]}'), shell=True, start_new_session=True)
|
| 37 |
+
gpus = gpus[2:]
|
| 38 |
+
sleep(SLEEP_TIME)
|
| 39 |
+
return commands
|
| 40 |
+
|
| 41 |
+
def main():
|
| 42 |
+
global commands, wait_cnt, eppt, end_ckpt, only_rank
|
| 43 |
+
eppt = ckpt
|
| 44 |
+
math_datasets = [
|
| 45 |
+
'AIME24',
|
| 46 |
+
]
|
| 47 |
+
qa_datasets = [
|
| 48 |
+
'aime', 'hotpotqa','2wiki', 'musique', "amc", "math500"
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
ckpt_dir = f"{ckpt}"
|
| 52 |
+
|
| 53 |
+
all_items = os.listdir(ckpt_dir)
|
| 54 |
+
step_list = []
|
| 55 |
+
for item in all_items:
|
| 56 |
+
if item.startswith("checkpoint-"):
|
| 57 |
+
try:
|
| 58 |
+
step = int(item.split("-")[1])
|
| 59 |
+
step_list.append(step)
|
| 60 |
+
except ValueError:
|
| 61 |
+
continue
|
| 62 |
+
|
| 63 |
+
step_list.sort()
|
| 64 |
+
|
| 65 |
+
for step in step_list:
|
| 66 |
+
checkpoint_dir = f"{ckpt}/checkpoint-{step}"
|
| 67 |
+
export_dir = checkpoint_dir
|
| 68 |
+
wait_cnt = 0
|
| 69 |
+
print(f"Waiting for checkpoint ({checkpoint_dir}) to exist...")
|
| 70 |
+
|
| 71 |
+
while not os.path.exists(os.path.join(checkpoint_dir, "special_tokens_map.json")):
|
| 72 |
+
commands = run(commands)
|
| 73 |
+
wait_cnt += 1
|
| 74 |
+
print(f"Already waiting {datetime.timedelta(seconds=SLEEP_TIME*wait_cnt)}.")
|
| 75 |
+
if wait_cnt > (86400 // SLEEP_TIME):
|
| 76 |
+
clear()
|
| 77 |
+
raise Exception("There have been no new checkpoints for no less than a day, and the program is about to automatically exit.")
|
| 78 |
+
# python3 = "/opt/aps/workdir/miniforge3/envs/search_o1/bin/python3"
|
| 79 |
+
python3 = "/opt/aps/workdir/miniforge3/envs/search_o1/bin/python"
|
| 80 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/python
|
| 81 |
+
print("The checkpoint exists. Waiting for running...")
|
| 82 |
+
for dataset in qa_datasets:
|
| 83 |
+
base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 84 |
+
dir = "/opt/aps/workdir/sunshuang/search_o1"
|
| 85 |
+
# cache_path = f"{dir}/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 86 |
+
cache_path = f"{dir}/cache_reason_two_model/eval_reason_two_model"
|
| 87 |
+
output_path = f"{dir}/outputs_reason_two_model/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 88 |
+
log_path = f"{dir}/logs/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 89 |
+
os.makedirs(cache_path, exist_ok=True)
|
| 90 |
+
os.makedirs(output_path, exist_ok=True)
|
| 91 |
+
os.makedirs(log_path, exist_ok=True)
|
| 92 |
+
contents = os.listdir(output_path)
|
| 93 |
+
|
| 94 |
+
subset_num = 100
|
| 95 |
+
if dataset == "math500":
|
| 96 |
+
subset_num = 500
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
if not contents:
|
| 100 |
+
command = (
|
| 101 |
+
"export http_proxy=http://127.0.0.1:7880 && "
|
| 102 |
+
"export https_proxy=http://127.0.0.1:7880 && "
|
| 103 |
+
"cd /opt/aps/workdir/sunshuang/search_o1/ && "
|
| 104 |
+
"CUDA_VISIBLE_DEVICES={device} "
|
| 105 |
+
f"nohup {python3} -u scripts/reason_two_model_5.py "
|
| 106 |
+
f"--dataset_name {dataset} "
|
| 107 |
+
f"--cache_dir_base {cache_path} "
|
| 108 |
+
f"--output_dir_base {output_path} "
|
| 109 |
+
"--split test "
|
| 110 |
+
"--max_search_limit 5 "
|
| 111 |
+
"--max_turn 10 "
|
| 112 |
+
"--top_k 5 "
|
| 113 |
+
"--max_doc_len 3000 "
|
| 114 |
+
f"--subset_num {subset_num} "
|
| 115 |
+
f"--model_path {checkpoint_dir} "
|
| 116 |
+
"--model_doc_reason_path \"/capacity/userdata/models/Qwen2.5-32B-Instruct\" "
|
| 117 |
+
"--jina_api_key \"jina_04d684ee4cc54d2ebe7c43bb7ad4aff0qlkdZGwm14NFBtm5BDkgK9KNf6vQ\" "
|
| 118 |
+
"--bing_subscription_key \"cb0d28279a826d7e5cf22d71f683c77ffd4ba27d\" "
|
| 119 |
+
"--bing_endpoint \"https://google.serper.dev/search\" "
|
| 120 |
+
"--openai_api_base \"http://localhost:8001/v1\" "
|
| 121 |
+
f"> {log_path}/inf.log 2>&1 &"
|
| 122 |
+
)
|
| 123 |
+
commands.put(command)
|
| 124 |
+
else:
|
| 125 |
+
print(f"skip evaluated model: {base_name}")
|
| 126 |
+
|
| 127 |
+
continue
|
| 128 |
+
|
| 129 |
+
for dataset in math_datasets:
|
| 130 |
+
dir = "/opt/aps/workdir/math/evaluation"
|
| 131 |
+
base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 132 |
+
output_path = f"{dir}/outputs/{base_name}"
|
| 133 |
+
log_path = f"{dir}/logs/{base_name}"
|
| 134 |
+
os.makedirs(output_path, exist_ok=True)
|
| 135 |
+
os.makedirs(log_path, exist_ok=True)
|
| 136 |
+
contents = os.listdir(output_path)
|
| 137 |
+
if not contents:
|
| 138 |
+
command = (
|
| 139 |
+
"cd /opt/aps/workdir/math/evaluation && "
|
| 140 |
+
"CUDA_VISIBLE_DEVICES={device} "
|
| 141 |
+
f"nohup {python3} -u run_eval_32b.py "
|
| 142 |
+
f"--data_name {dataset} "
|
| 143 |
+
f"--target_path {output_path} "
|
| 144 |
+
f"--model_name_or_path {checkpoint_dir} "
|
| 145 |
+
"--prompt v4 "
|
| 146 |
+
"--max_tokens 8192 "
|
| 147 |
+
"--paralle_size 1 "
|
| 148 |
+
f"> {log_path}/inf.log 2>&1 &"
|
| 149 |
+
)
|
| 150 |
+
commands.put(command)
|
| 151 |
+
else:
|
| 152 |
+
print(f"skip evaluated model: {base_name}")
|
| 153 |
+
|
| 154 |
+
def clear():
|
| 155 |
+
print("All checkpoints exist. Wait for runing...")
|
| 156 |
+
global commands
|
| 157 |
+
while not commands.empty():
|
| 158 |
+
# sleep(SLEEP_TIME)
|
| 159 |
+
commands = run(commands)
|
| 160 |
+
print("Wish me good luck!")
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
if __name__ == '__main__':
|
| 164 |
+
# print(sys.argv[1])
|
| 165 |
+
ckpt = sys.argv[1]
|
| 166 |
+
main()
|
| 167 |
+
clear()
|
deep_search/sft/wait_eval_use_one_model_for_ckpt.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from time import sleep
|
| 3 |
+
import datetime
|
| 4 |
+
import subprocess
|
| 5 |
+
import json
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import GPUtil
|
| 8 |
+
import sys
|
| 9 |
+
import random
|
| 10 |
+
|
| 11 |
+
wait_cnt = 0
|
| 12 |
+
SLEEP_TIME = 120
|
| 13 |
+
NEED_REVERSE = False
|
| 14 |
+
eppt = None
|
| 15 |
+
NEED_ATLAS = False
|
| 16 |
+
only_rank = False
|
| 17 |
+
commands = Queue()
|
| 18 |
+
|
| 19 |
+
# print(f"wait 120s")
|
| 20 |
+
# sleep(120)
|
| 21 |
+
|
| 22 |
+
def get_free_gpu(threshold=0.95):
|
| 23 |
+
# 获取所有可用的GPU设备
|
| 24 |
+
gpus = GPUtil.getGPUs()
|
| 25 |
+
# 筛选出显存空闲率高于阈值的GPU
|
| 26 |
+
available_gpus = [gpu.id for gpu in gpus if gpu.memoryFree > 80000 ]
|
| 27 |
+
print(f"available_gpus: {available_gpus}")
|
| 28 |
+
return available_gpus
|
| 29 |
+
|
| 30 |
+
def run(commands):
|
| 31 |
+
gpus = get_free_gpu()
|
| 32 |
+
if NEED_REVERSE:
|
| 33 |
+
gpus.reverse()
|
| 34 |
+
# random.shuffle(gpus)
|
| 35 |
+
# print(f"available_gpus: {gpus}")
|
| 36 |
+
while not commands.empty() and len(gpus) >= 2:
|
| 37 |
+
command = commands.get()
|
| 38 |
+
print(f"The following command is about to run:\n{command.format(device=f'{gpus[0]},{gpus[1]}')}")
|
| 39 |
+
subprocess.Popen(command.format(device=f'{gpus[0]},{gpus[1]}'), shell=True, start_new_session=True)
|
| 40 |
+
gpus = gpus[2:]
|
| 41 |
+
sleep(SLEEP_TIME)
|
| 42 |
+
return commands
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
global commands, wait_cnt, eppt, end_ckpt, only_rank
|
| 46 |
+
eppt = ckpt
|
| 47 |
+
math_datasets = [
|
| 48 |
+
'AIME24',
|
| 49 |
+
]
|
| 50 |
+
# qa_datasets = [
|
| 51 |
+
# 'aime', 'hotpotqa','2wiki', 'musique', "amc", "math500"
|
| 52 |
+
# ]
|
| 53 |
+
# qa_datasets = [
|
| 54 |
+
# 'simpleqa', 'hotpotqa'
|
| 55 |
+
# ]
|
| 56 |
+
qa_datasets = [
|
| 57 |
+
'eval_old_500'
|
| 58 |
+
]
|
| 59 |
+
ckpt_dir = f"{ckpt}"
|
| 60 |
+
|
| 61 |
+
all_items = os.listdir(ckpt_dir)
|
| 62 |
+
step_list = []
|
| 63 |
+
for item in all_items:
|
| 64 |
+
if item.startswith("checkpoint-"):
|
| 65 |
+
try:
|
| 66 |
+
step = int(item.split("-")[1])
|
| 67 |
+
step_list.append(step)
|
| 68 |
+
except ValueError:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
step_list.sort(reverse=True) # 从大到小排序
|
| 72 |
+
# print(f"step_list: {step_list}")
|
| 73 |
+
# # step_list = step_list[:2] # 只取最新的checkpoint
|
| 74 |
+
print(f"step_list: {step_list}")
|
| 75 |
+
step_list = step_list[:4]
|
| 76 |
+
# step_list = ['55']
|
| 77 |
+
print(f"step_list: {step_list}")
|
| 78 |
+
for step in step_list:
|
| 79 |
+
checkpoint_dir = f"{ckpt}/checkpoint-{step}"
|
| 80 |
+
export_dir = checkpoint_dir
|
| 81 |
+
wait_cnt = 0
|
| 82 |
+
print(f"Waiting for checkpoint ({checkpoint_dir}) to exist...")
|
| 83 |
+
|
| 84 |
+
while not os.path.exists(os.path.join(checkpoint_dir, "special_tokens_map.json")):
|
| 85 |
+
commands = run(commands)
|
| 86 |
+
wait_cnt += 1
|
| 87 |
+
print(f"Already waiting {datetime.timedelta(seconds=SLEEP_TIME*wait_cnt)}.")
|
| 88 |
+
if wait_cnt > (86400 // SLEEP_TIME):
|
| 89 |
+
clear()
|
| 90 |
+
raise Exception("There have been no new checkpoints for no less than a day, and the program is about to automatically exit.")
|
| 91 |
+
# python3 = "/opt/aps/workdir/miniforge3/envs/search_o1/bin/python3"
|
| 92 |
+
python3 = "/opt/aps/workdir/search_o1/bin/python3"
|
| 93 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/python
|
| 94 |
+
print("The checkpoint exists. Waiting for running...")
|
| 95 |
+
for dataset in qa_datasets:
|
| 96 |
+
base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 97 |
+
dir = "/opt/aps/workdir/sunshuang/deep_search/search_o1"
|
| 98 |
+
# cache_path = f"{dir}/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 99 |
+
cache_path = f"{dir}/cache_eval_sum_all_webpage"
|
| 100 |
+
output_path = f"{dir}/output/output_eval/{base_name}"
|
| 101 |
+
log_path = f"{dir}/logs/log_eval/{base_name}"
|
| 102 |
+
os.makedirs(cache_path, exist_ok=True)
|
| 103 |
+
os.makedirs(output_path, exist_ok=True)
|
| 104 |
+
os.makedirs(log_path, exist_ok=True)
|
| 105 |
+
contents = os.listdir(output_path)
|
| 106 |
+
|
| 107 |
+
# subset_num = 500
|
| 108 |
+
# if dataset == "math500":
|
| 109 |
+
# subset_num = 500
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
if not contents:
|
| 113 |
+
command = (
|
| 114 |
+
"export https_proxy=http://127.0.0.1:7890 && "
|
| 115 |
+
"export http_proxy=http://127.0.0.1:7890 && "
|
| 116 |
+
"export all_proxy=socks5://127.0.0.1:7891 && "
|
| 117 |
+
"cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && "
|
| 118 |
+
"CUDA_VISIBLE_DEVICES={device} "
|
| 119 |
+
f"nohup {python3} -u scripts/search_o1_sum_all_webpage.py "
|
| 120 |
+
f"--dataset_name {dataset} "
|
| 121 |
+
f"--cache_dir_base {cache_path} "
|
| 122 |
+
f"--output_dir_base {output_path} "
|
| 123 |
+
"--split test "
|
| 124 |
+
"--max_search_limit 10 "
|
| 125 |
+
"--max_turn 10 "
|
| 126 |
+
"--top_k 10 "
|
| 127 |
+
"--max_doc_len 5000 "
|
| 128 |
+
f"--model_path {checkpoint_dir} "
|
| 129 |
+
"--bing_subscription_key \"cb0d28279a826d7e5cf22d71f683c77ffd4ba27d\" "
|
| 130 |
+
"--bing_endpoint \"https://google.serper.dev/search\" "
|
| 131 |
+
f"> {log_path}/inf.log 2>&1 &"
|
| 132 |
+
)
|
| 133 |
+
print(f"command: {command}")
|
| 134 |
+
commands.put(command)
|
| 135 |
+
else:
|
| 136 |
+
print(f"skip evaluated model: {base_name}")
|
| 137 |
+
|
| 138 |
+
# continue
|
| 139 |
+
|
| 140 |
+
# for dataset in math_datasets:
|
| 141 |
+
# dir = "/opt/aps/workdir/math/evaluation"
|
| 142 |
+
# base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 143 |
+
# output_path = f"{dir}/outputs_maxtoken_20000/{base_name}"
|
| 144 |
+
# log_path = f"{dir}/logs_maxtoken_20000/{base_name}"
|
| 145 |
+
# os.makedirs(output_path, exist_ok=True)
|
| 146 |
+
# os.makedirs(log_path, exist_ok=True)
|
| 147 |
+
# contents = os.listdir(output_path)
|
| 148 |
+
# if not contents:
|
| 149 |
+
# command = (
|
| 150 |
+
# "cd /opt/aps/workdir/math/evaluation && "
|
| 151 |
+
# "CUDA_VISIBLE_DEVICES={device} "
|
| 152 |
+
# f"nohup {python3} -u run_eval_32b.py "
|
| 153 |
+
# f"--data_name {dataset} "
|
| 154 |
+
# f"--target_path {output_path} "
|
| 155 |
+
# f"--model_name_or_path {checkpoint_dir} "
|
| 156 |
+
# "--prompt v4 "
|
| 157 |
+
# "--max_tokens 20000 "
|
| 158 |
+
# "--paralle_size 2 "
|
| 159 |
+
# f"> {log_path}/inf.log 2>&1 &"
|
| 160 |
+
# )
|
| 161 |
+
# commands.put(command)
|
| 162 |
+
# else:
|
| 163 |
+
# print(f"skip evaluated model: {base_name}")
|
| 164 |
+
|
| 165 |
+
def clear():
|
| 166 |
+
print("All checkpoints exist. Wait for runing...")
|
| 167 |
+
global commands
|
| 168 |
+
while not commands.empty():
|
| 169 |
+
# sleep(SLEEP_TIME)
|
| 170 |
+
commands = run(commands)
|
| 171 |
+
print("Wish me good luck!")
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
if __name__ == '__main__':
|
| 175 |
+
# print(sys.argv[1])
|
| 176 |
+
ckpt = sys.argv[1]
|
| 177 |
+
main()
|
| 178 |
+
clear()
|
deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from time import sleep
|
| 3 |
+
import datetime
|
| 4 |
+
import subprocess
|
| 5 |
+
import json
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import GPUtil
|
| 8 |
+
import sys
|
| 9 |
+
import random
|
| 10 |
+
|
| 11 |
+
wait_cnt = 0
|
| 12 |
+
SLEEP_TIME = 120
|
| 13 |
+
NEED_REVERSE = False
|
| 14 |
+
eppt = None
|
| 15 |
+
NEED_ATLAS = False
|
| 16 |
+
only_rank = False
|
| 17 |
+
commands = Queue()
|
| 18 |
+
|
| 19 |
+
# print(f"wait 120s")
|
| 20 |
+
# sleep(120)
|
| 21 |
+
|
| 22 |
+
def get_free_gpu(threshold=0.95):
|
| 23 |
+
# 获取所有可用的GPU设备
|
| 24 |
+
gpus = GPUtil.getGPUs()
|
| 25 |
+
# 筛选出显存空闲率高于阈值的GPU
|
| 26 |
+
available_gpus = [gpu.id for gpu in gpus if gpu.memoryFree > 80000 ]
|
| 27 |
+
print(f"available_gpus: {available_gpus}")
|
| 28 |
+
return available_gpus
|
| 29 |
+
|
| 30 |
+
def run(commands):
|
| 31 |
+
gpus = get_free_gpu()
|
| 32 |
+
if NEED_REVERSE:
|
| 33 |
+
gpus.reverse()
|
| 34 |
+
# random.shuffle(gpus)
|
| 35 |
+
# print(f"available_gpus: {gpus}")
|
| 36 |
+
while not commands.empty() and len(gpus) >= 2:
|
| 37 |
+
command = commands.get()
|
| 38 |
+
print(f"The following command is about to run:\n{command.format(device=f'{gpus[0]},{gpus[1]}')}")
|
| 39 |
+
subprocess.Popen(command.format(device=f'{gpus[0]},{gpus[1]}'), shell=True, start_new_session=True)
|
| 40 |
+
gpus = gpus[2:]
|
| 41 |
+
sleep(SLEEP_TIME)
|
| 42 |
+
return commands
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
global commands, wait_cnt, eppt, end_ckpt, only_rank
|
| 46 |
+
eppt = ckpt
|
| 47 |
+
math_datasets = [
|
| 48 |
+
'AIME24',
|
| 49 |
+
]
|
| 50 |
+
# qa_datasets = [
|
| 51 |
+
# 'aime', 'hotpotqa','2wiki', 'musique', "amc", "math500"
|
| 52 |
+
# ]
|
| 53 |
+
# qa_datasets = [
|
| 54 |
+
# 'simpleqa', 'hotpotqa'
|
| 55 |
+
# ]
|
| 56 |
+
qa_datasets = [
|
| 57 |
+
'eval', 'gaia'
|
| 58 |
+
]
|
| 59 |
+
ckpt_dir = f"{ckpt}"
|
| 60 |
+
|
| 61 |
+
all_items = os.listdir(ckpt_dir)
|
| 62 |
+
step_list = []
|
| 63 |
+
for item in all_items:
|
| 64 |
+
if item.startswith("checkpoint-"):
|
| 65 |
+
try:
|
| 66 |
+
step = int(item.split("-")[1])
|
| 67 |
+
step_list.append(step)
|
| 68 |
+
except ValueError:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
step_list.sort(reverse=True) # 从大到小排序
|
| 72 |
+
# print(f"step_list: {step_list}")
|
| 73 |
+
# # step_list = step_list[:2] # 只取最新的checkpoint
|
| 74 |
+
print(f"step_list: {step_list}")
|
| 75 |
+
# step_list = step_list[:4]
|
| 76 |
+
# step_list = ['55']
|
| 77 |
+
print(f"step_list: {step_list}")
|
| 78 |
+
for step in step_list:
|
| 79 |
+
checkpoint_dir = f"{ckpt}/checkpoint-{step}"
|
| 80 |
+
export_dir = checkpoint_dir
|
| 81 |
+
wait_cnt = 0
|
| 82 |
+
print(f"Waiting for checkpoint ({checkpoint_dir}) to exist...")
|
| 83 |
+
|
| 84 |
+
while not os.path.exists(os.path.join(checkpoint_dir, "special_tokens_map.json")):
|
| 85 |
+
commands = run(commands)
|
| 86 |
+
wait_cnt += 1
|
| 87 |
+
print(f"Already waiting {datetime.timedelta(seconds=SLEEP_TIME*wait_cnt)}.")
|
| 88 |
+
if wait_cnt > (86400 // SLEEP_TIME):
|
| 89 |
+
clear()
|
| 90 |
+
raise Exception("There have been no new checkpoints for no less than a day, and the program is about to automatically exit.")
|
| 91 |
+
# python3 = "/opt/aps/workdir/miniforge3/envs/search_o1/bin/python3"
|
| 92 |
+
python3 = "/opt/aps/workdir/search_o1/bin/python3"
|
| 93 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/python
|
| 94 |
+
print("The checkpoint exists. Waiting for running...")
|
| 95 |
+
for dataset in qa_datasets:
|
| 96 |
+
base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 97 |
+
dir = "/opt/aps/workdir/sunshuang/deep_search/search_o1"
|
| 98 |
+
# cache_path = f"{dir}/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 99 |
+
cache_path = f"{dir}/cache_eval_sum_all_webpage_1w"
|
| 100 |
+
output_path = f"{dir}/output/output_eval/{base_name}"
|
| 101 |
+
log_path = f"{dir}/logs/log_eval/{base_name}"
|
| 102 |
+
os.makedirs(cache_path, exist_ok=True)
|
| 103 |
+
os.makedirs(output_path, exist_ok=True)
|
| 104 |
+
os.makedirs(log_path, exist_ok=True)
|
| 105 |
+
contents = os.listdir(output_path)
|
| 106 |
+
|
| 107 |
+
# subset_num = 500
|
| 108 |
+
# if dataset == "math500":
|
| 109 |
+
# subset_num = 500
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
if not contents:
|
| 113 |
+
command = (
|
| 114 |
+
"export https_proxy=http://127.0.0.1:7890 && "
|
| 115 |
+
"export http_proxy=http://127.0.0.1:7890 && "
|
| 116 |
+
"export all_proxy=socks5://127.0.0.1:7891 && "
|
| 117 |
+
"cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && "
|
| 118 |
+
"CUDA_VISIBLE_DEVICES={device} "
|
| 119 |
+
f"nohup {python3} -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py "
|
| 120 |
+
f"--dataset_name {dataset} "
|
| 121 |
+
f"--cache_dir_base {cache_path} "
|
| 122 |
+
f"--output_dir_base {output_path} "
|
| 123 |
+
"--split test "
|
| 124 |
+
"--max_search_limit 10 "
|
| 125 |
+
"--max_turn 10 "
|
| 126 |
+
"--top_k 10 "
|
| 127 |
+
"--max_doc_len 5000 "
|
| 128 |
+
f"--model_path {checkpoint_dir} "
|
| 129 |
+
"--bing_subscription_key \"cb0d28279a826d7e5cf22d71f683c77ffd4ba27d\" "
|
| 130 |
+
"--bing_endpoint \"https://google.serper.dev/search\" "
|
| 131 |
+
f"> {log_path}/inf.log 2>&1 &"
|
| 132 |
+
)
|
| 133 |
+
print(f"command: {command}")
|
| 134 |
+
commands.put(command)
|
| 135 |
+
else:
|
| 136 |
+
print(f"skip evaluated model: {base_name}")
|
| 137 |
+
|
| 138 |
+
# continue
|
| 139 |
+
|
| 140 |
+
# for dataset in math_datasets:
|
| 141 |
+
# dir = "/opt/aps/workdir/math/evaluation"
|
| 142 |
+
# base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 143 |
+
# output_path = f"{dir}/outputs_maxtoken_20000/{base_name}"
|
| 144 |
+
# log_path = f"{dir}/logs_maxtoken_20000/{base_name}"
|
| 145 |
+
# os.makedirs(output_path, exist_ok=True)
|
| 146 |
+
# os.makedirs(log_path, exist_ok=True)
|
| 147 |
+
# contents = os.listdir(output_path)
|
| 148 |
+
# if not contents:
|
| 149 |
+
# command = (
|
| 150 |
+
# "cd /opt/aps/workdir/math/evaluation && "
|
| 151 |
+
# "CUDA_VISIBLE_DEVICES={device} "
|
| 152 |
+
# f"nohup {python3} -u run_eval_32b.py "
|
| 153 |
+
# f"--data_name {dataset} "
|
| 154 |
+
# f"--target_path {output_path} "
|
| 155 |
+
# f"--model_name_or_path {checkpoint_dir} "
|
| 156 |
+
# "--prompt v4 "
|
| 157 |
+
# "--max_tokens 20000 "
|
| 158 |
+
# "--paralle_size 2 "
|
| 159 |
+
# f"> {log_path}/inf.log 2>&1 &"
|
| 160 |
+
# )
|
| 161 |
+
# commands.put(command)
|
| 162 |
+
# else:
|
| 163 |
+
# print(f"skip evaluated model: {base_name}")
|
| 164 |
+
|
| 165 |
+
def clear():
|
| 166 |
+
print("All checkpoints exist. Wait for runing...")
|
| 167 |
+
global commands
|
| 168 |
+
while not commands.empty():
|
| 169 |
+
# sleep(SLEEP_TIME)
|
| 170 |
+
commands = run(commands)
|
| 171 |
+
print("Wish me good luck!")
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
if __name__ == '__main__':
|
| 175 |
+
# print(sys.argv[1])
|
| 176 |
+
ckpt = sys.argv[1]
|
| 177 |
+
main()
|
| 178 |
+
clear()
|
deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_dpsk.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from time import sleep
|
| 3 |
+
import datetime
|
| 4 |
+
import subprocess
|
| 5 |
+
import json
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import GPUtil
|
| 8 |
+
import sys
|
| 9 |
+
import random
|
| 10 |
+
|
| 11 |
+
wait_cnt = 0
|
| 12 |
+
SLEEP_TIME = 120
|
| 13 |
+
NEED_REVERSE = False
|
| 14 |
+
eppt = None
|
| 15 |
+
NEED_ATLAS = False
|
| 16 |
+
only_rank = False
|
| 17 |
+
commands = Queue()
|
| 18 |
+
|
| 19 |
+
# print(f"wait 120s")
|
| 20 |
+
# sleep(120)
|
| 21 |
+
|
| 22 |
+
def get_free_gpu(threshold=0.95):
|
| 23 |
+
# 获取所有可用的GPU设备
|
| 24 |
+
gpus = GPUtil.getGPUs()
|
| 25 |
+
# 筛选出显存空闲率高于阈值的GPU
|
| 26 |
+
available_gpus = [gpu.id for gpu in gpus if gpu.memoryFree > 80000 ]
|
| 27 |
+
print(f"available_gpus: {available_gpus}")
|
| 28 |
+
return available_gpus
|
| 29 |
+
|
| 30 |
+
def run(commands):
|
| 31 |
+
gpus = get_free_gpu()
|
| 32 |
+
if NEED_REVERSE:
|
| 33 |
+
gpus.reverse()
|
| 34 |
+
# random.shuffle(gpus)
|
| 35 |
+
# print(f"available_gpus: {gpus}")
|
| 36 |
+
while not commands.empty() and len(gpus) >= 2:
|
| 37 |
+
command = commands.get()
|
| 38 |
+
print(f"The following command is about to run:\n{command.format(device=f'{gpus[0]},{gpus[1]}')}")
|
| 39 |
+
subprocess.Popen(command.format(device=f'{gpus[0]},{gpus[1]}'), shell=True, start_new_session=True)
|
| 40 |
+
gpus = gpus[2:]
|
| 41 |
+
sleep(SLEEP_TIME)
|
| 42 |
+
return commands
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
global commands, wait_cnt, eppt, end_ckpt, only_rank
|
| 46 |
+
eppt = ckpt
|
| 47 |
+
math_datasets = [
|
| 48 |
+
'AIME24',
|
| 49 |
+
]
|
| 50 |
+
# qa_datasets = [
|
| 51 |
+
# 'aime', 'hotpotqa','2wiki', 'musique', "amc", "math500"
|
| 52 |
+
# ]
|
| 53 |
+
# qa_datasets = [
|
| 54 |
+
# 'simpleqa', 'hotpotqa'
|
| 55 |
+
# ]
|
| 56 |
+
qa_datasets = [
|
| 57 |
+
'eval', 'gaia', 'aime'
|
| 58 |
+
]
|
| 59 |
+
ckpt_dir = f"{ckpt}"
|
| 60 |
+
|
| 61 |
+
all_items = os.listdir(ckpt_dir)
|
| 62 |
+
step_list = []
|
| 63 |
+
for item in all_items:
|
| 64 |
+
if item.startswith("checkpoint-"):
|
| 65 |
+
try:
|
| 66 |
+
step = int(item.split("-")[1])
|
| 67 |
+
step_list.append(step)
|
| 68 |
+
except ValueError:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
step_list.sort(reverse=True) # 从大到小排序
|
| 72 |
+
# print(f"step_list: {step_list}")
|
| 73 |
+
# # step_list = step_list[:2] # 只取最新的checkpoint
|
| 74 |
+
print(f"step_list: {step_list}")
|
| 75 |
+
# step_list = step_list[:4]
|
| 76 |
+
# step_list = ['55']
|
| 77 |
+
print(f"step_list: {step_list}")
|
| 78 |
+
for step in step_list:
|
| 79 |
+
checkpoint_dir = f"{ckpt}/checkpoint-{step}"
|
| 80 |
+
export_dir = checkpoint_dir
|
| 81 |
+
wait_cnt = 0
|
| 82 |
+
print(f"Waiting for checkpoint ({checkpoint_dir}) to exist...")
|
| 83 |
+
|
| 84 |
+
while not os.path.exists(os.path.join(checkpoint_dir, "special_tokens_map.json")):
|
| 85 |
+
commands = run(commands)
|
| 86 |
+
wait_cnt += 1
|
| 87 |
+
print(f"Already waiting {datetime.timedelta(seconds=SLEEP_TIME*wait_cnt)}.")
|
| 88 |
+
if wait_cnt > (86400 // SLEEP_TIME):
|
| 89 |
+
clear()
|
| 90 |
+
raise Exception("There have been no new checkpoints for no less than a day, and the program is about to automatically exit.")
|
| 91 |
+
# python3 = "/opt/aps/workdir/miniforge3/envs/search_o1/bin/python3"
|
| 92 |
+
python3 = "/opt/aps/workdir/search_o1/bin/python3"
|
| 93 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/python
|
| 94 |
+
print("The checkpoint exists. Waiting for running...")
|
| 95 |
+
for dataset in qa_datasets:
|
| 96 |
+
base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 97 |
+
dir = "/opt/aps/workdir/sunshuang/deep_search/search_o1"
|
| 98 |
+
# cache_path = f"{dir}/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 99 |
+
cache_path = f"{dir}/cache_eval_sum_all_webpage_1w"
|
| 100 |
+
output_path = f"{dir}/output/output_eval/{base_name}"
|
| 101 |
+
log_path = f"{dir}/logs/log_eval/{base_name}"
|
| 102 |
+
os.makedirs(cache_path, exist_ok=True)
|
| 103 |
+
os.makedirs(output_path, exist_ok=True)
|
| 104 |
+
os.makedirs(log_path, exist_ok=True)
|
| 105 |
+
contents = os.listdir(output_path)
|
| 106 |
+
|
| 107 |
+
# subset_num = 500
|
| 108 |
+
# if dataset == "math500":
|
| 109 |
+
# subset_num = 500
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
if not contents:
|
| 113 |
+
command = (
|
| 114 |
+
"export https_proxy=http://127.0.0.1:7890 && "
|
| 115 |
+
"export http_proxy=http://127.0.0.1:7890 && "
|
| 116 |
+
"export all_proxy=socks5://127.0.0.1:7891 && "
|
| 117 |
+
"cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && "
|
| 118 |
+
"CUDA_VISIBLE_DEVICES={device} "
|
| 119 |
+
f"nohup {python3} -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_dpsk.py "
|
| 120 |
+
f"--dataset_name {dataset} "
|
| 121 |
+
f"--cache_dir_base {cache_path} "
|
| 122 |
+
f"--output_dir_base {output_path} "
|
| 123 |
+
"--split test "
|
| 124 |
+
"--max_search_limit 10 "
|
| 125 |
+
"--max_turn 10 "
|
| 126 |
+
"--top_k 10 "
|
| 127 |
+
"--max_doc_len 5000 "
|
| 128 |
+
f"--model_path {checkpoint_dir} "
|
| 129 |
+
"--bing_subscription_key \"cb0d28279a826d7e5cf22d71f683c77ffd4ba27d\" "
|
| 130 |
+
"--bing_endpoint \"https://google.serper.dev/search\" "
|
| 131 |
+
f"> {log_path}/inf.log 2>&1 &"
|
| 132 |
+
)
|
| 133 |
+
print(f"command: {command}")
|
| 134 |
+
commands.put(command)
|
| 135 |
+
else:
|
| 136 |
+
print(f"skip evaluated model: {base_name}")
|
| 137 |
+
|
| 138 |
+
# continue
|
| 139 |
+
|
| 140 |
+
# for dataset in math_datasets:
|
| 141 |
+
# dir = "/opt/aps/workdir/math/evaluation"
|
| 142 |
+
# base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 143 |
+
# output_path = f"{dir}/outputs_maxtoken_20000/{base_name}"
|
| 144 |
+
# log_path = f"{dir}/logs_maxtoken_20000/{base_name}"
|
| 145 |
+
# os.makedirs(output_path, exist_ok=True)
|
| 146 |
+
# os.makedirs(log_path, exist_ok=True)
|
| 147 |
+
# contents = os.listdir(output_path)
|
| 148 |
+
# if not contents:
|
| 149 |
+
# command = (
|
| 150 |
+
# "cd /opt/aps/workdir/math/evaluation && "
|
| 151 |
+
# "CUDA_VISIBLE_DEVICES={device} "
|
| 152 |
+
# f"nohup {python3} -u run_eval_32b.py "
|
| 153 |
+
# f"--data_name {dataset} "
|
| 154 |
+
# f"--target_path {output_path} "
|
| 155 |
+
# f"--model_name_or_path {checkpoint_dir} "
|
| 156 |
+
# "--prompt v4 "
|
| 157 |
+
# "--max_tokens 20000 "
|
| 158 |
+
# "--paralle_size 2 "
|
| 159 |
+
# f"> {log_path}/inf.log 2>&1 &"
|
| 160 |
+
# )
|
| 161 |
+
# commands.put(command)
|
| 162 |
+
# else:
|
| 163 |
+
# print(f"skip evaluated model: {base_name}")
|
| 164 |
+
|
| 165 |
+
def clear():
|
| 166 |
+
print("All checkpoints exist. Wait for runing...")
|
| 167 |
+
global commands
|
| 168 |
+
while not commands.empty():
|
| 169 |
+
# sleep(SLEEP_TIME)
|
| 170 |
+
commands = run(commands)
|
| 171 |
+
print("Wish me good luck!")
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
if __name__ == '__main__':
|
| 175 |
+
# print(sys.argv[1])
|
| 176 |
+
ckpt = sys.argv[1]
|
| 177 |
+
main()
|
| 178 |
+
clear()
|
deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model_qwq.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from time import sleep
|
| 3 |
+
import datetime
|
| 4 |
+
import subprocess
|
| 5 |
+
import json
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import GPUtil
|
| 8 |
+
import sys
|
| 9 |
+
import random
|
| 10 |
+
|
| 11 |
+
wait_cnt = 0
|
| 12 |
+
SLEEP_TIME = 120
|
| 13 |
+
NEED_REVERSE = False
|
| 14 |
+
eppt = None
|
| 15 |
+
NEED_ATLAS = False
|
| 16 |
+
only_rank = False
|
| 17 |
+
commands = Queue()
|
| 18 |
+
|
| 19 |
+
# print(f"wait 120s")
|
| 20 |
+
# sleep(120)
|
| 21 |
+
|
| 22 |
+
def get_free_gpu(threshold=0.95):
|
| 23 |
+
# 获取所有可用的GPU设备
|
| 24 |
+
gpus = GPUtil.getGPUs()
|
| 25 |
+
# 筛选出显存空闲率高于阈值的GPU
|
| 26 |
+
available_gpus = [gpu.id for gpu in gpus if gpu.memoryFree > 80000 ]
|
| 27 |
+
print(f"available_gpus: {available_gpus}")
|
| 28 |
+
return available_gpus
|
| 29 |
+
|
| 30 |
+
def run(commands):
|
| 31 |
+
gpus = get_free_gpu()
|
| 32 |
+
if NEED_REVERSE:
|
| 33 |
+
gpus.reverse()
|
| 34 |
+
# random.shuffle(gpus)
|
| 35 |
+
# print(f"available_gpus: {gpus}")
|
| 36 |
+
while not commands.empty() and len(gpus) >= 2:
|
| 37 |
+
command = commands.get()
|
| 38 |
+
print(f"The following command is about to run:\n{command.format(device=f'{gpus[0]},{gpus[1]}')}")
|
| 39 |
+
subprocess.Popen(command.format(device=f'{gpus[0]},{gpus[1]}'), shell=True, start_new_session=True)
|
| 40 |
+
gpus = gpus[2:]
|
| 41 |
+
sleep(SLEEP_TIME)
|
| 42 |
+
return commands
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
global commands, wait_cnt, eppt, end_ckpt, only_rank
|
| 46 |
+
eppt = ckpt
|
| 47 |
+
math_datasets = [
|
| 48 |
+
'AIME24',
|
| 49 |
+
]
|
| 50 |
+
# qa_datasets = [
|
| 51 |
+
# 'aime', 'hotpotqa','2wiki', 'musique', "amc", "math500"
|
| 52 |
+
# ]
|
| 53 |
+
# qa_datasets = [
|
| 54 |
+
# 'simpleqa', 'hotpotqa'
|
| 55 |
+
# ]
|
| 56 |
+
qa_datasets = [
|
| 57 |
+
'gaia', 'bamboogle'
|
| 58 |
+
]
|
| 59 |
+
ckpt_dir = f"{ckpt}"
|
| 60 |
+
|
| 61 |
+
all_items = os.listdir(ckpt_dir)
|
| 62 |
+
step_list = []
|
| 63 |
+
for item in all_items:
|
| 64 |
+
if item.startswith("checkpoint-"):
|
| 65 |
+
try:
|
| 66 |
+
step = int(item.split("-")[1])
|
| 67 |
+
step_list.append(step)
|
| 68 |
+
except ValueError:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
step_list.sort(reverse=True) # 从大到小排序
|
| 72 |
+
# print(f"step_list: {step_list}")
|
| 73 |
+
# # step_list = step_list[:2] # 只取最新的checkpoint
|
| 74 |
+
print(f"step_list: {step_list}")
|
| 75 |
+
# step_list = step_list[:3]
|
| 76 |
+
# step_list = ['55']
|
| 77 |
+
# print(f"step_list: {step_list}")
|
| 78 |
+
for step in step_list:
|
| 79 |
+
checkpoint_dir = f"{ckpt}/checkpoint-{step}"
|
| 80 |
+
export_dir = checkpoint_dir
|
| 81 |
+
wait_cnt = 0
|
| 82 |
+
print(f"Waiting for checkpoint ({checkpoint_dir}) to exist...")
|
| 83 |
+
|
| 84 |
+
while not os.path.exists(os.path.join(checkpoint_dir, "special_tokens_map.json")):
|
| 85 |
+
commands = run(commands)
|
| 86 |
+
wait_cnt += 1
|
| 87 |
+
print(f"Already waiting {datetime.timedelta(seconds=SLEEP_TIME*wait_cnt)}.")
|
| 88 |
+
if wait_cnt > (86400 // SLEEP_TIME):
|
| 89 |
+
clear()
|
| 90 |
+
raise Exception("There have been no new checkpoints for no less than a day, and the program is about to automatically exit.")
|
| 91 |
+
# python3 = "/opt/aps/workdir/miniforge3/envs/search_o1/bin/python3"
|
| 92 |
+
python3 = "/opt/aps/workdir/search_o1/bin/python3"
|
| 93 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/python
|
| 94 |
+
print("The checkpoint exists. Waiting for running...")
|
| 95 |
+
for dataset in qa_datasets:
|
| 96 |
+
base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 97 |
+
dir = "/opt/aps/workdir/sunshuang/deep_search/search_o1"
|
| 98 |
+
# cache_path = f"{dir}/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 99 |
+
cache_path = f"{dir}/cache_eval_sum_all_webpage_1w"
|
| 100 |
+
output_path = f"{dir}/output/output_eval/{base_name}"
|
| 101 |
+
log_path = f"{dir}/logs/log_eval/{base_name}"
|
| 102 |
+
os.makedirs(cache_path, exist_ok=True)
|
| 103 |
+
os.makedirs(output_path, exist_ok=True)
|
| 104 |
+
os.makedirs(log_path, exist_ok=True)
|
| 105 |
+
contents = os.listdir(output_path)
|
| 106 |
+
|
| 107 |
+
# subset_num = 500
|
| 108 |
+
# if dataset == "math500":
|
| 109 |
+
# subset_num = 500
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
if not contents:
|
| 113 |
+
command = (
|
| 114 |
+
"export https_proxy=http://127.0.0.1:7890 && "
|
| 115 |
+
"export http_proxy=http://127.0.0.1:7890 && "
|
| 116 |
+
"export all_proxy=socks5://127.0.0.1:7891 && "
|
| 117 |
+
"cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && "
|
| 118 |
+
"CUDA_VISIBLE_DEVICES={device} "
|
| 119 |
+
f"nohup {python3} -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py "
|
| 120 |
+
f"--dataset_name {dataset} "
|
| 121 |
+
f"--cache_dir_base {cache_path} "
|
| 122 |
+
f"--output_dir_base {output_path} "
|
| 123 |
+
"--split test "
|
| 124 |
+
"--max_search_limit 10 "
|
| 125 |
+
"--max_turn 10 "
|
| 126 |
+
"--top_k 10 "
|
| 127 |
+
"--max_doc_len 5000 "
|
| 128 |
+
f"--model_path {checkpoint_dir} "
|
| 129 |
+
"--bing_subscription_key \"cb0d28279a826d7e5cf22d71f683c77ffd4ba27d\" "
|
| 130 |
+
"--bing_endpoint \"https://google.serper.dev/search\" "
|
| 131 |
+
f"> {log_path}/inf.log 2>&1 &"
|
| 132 |
+
)
|
| 133 |
+
print(f"command: {command}")
|
| 134 |
+
commands.put(command)
|
| 135 |
+
else:
|
| 136 |
+
print(f"skip evaluated model: {base_name}")
|
| 137 |
+
|
| 138 |
+
# continue
|
| 139 |
+
|
| 140 |
+
# for dataset in math_datasets:
|
| 141 |
+
# dir = "/opt/aps/workdir/math/evaluation"
|
| 142 |
+
# base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 143 |
+
# output_path = f"{dir}/outputs_maxtoken_20000/{base_name}"
|
| 144 |
+
# log_path = f"{dir}/logs_maxtoken_20000/{base_name}"
|
| 145 |
+
# os.makedirs(output_path, exist_ok=True)
|
| 146 |
+
# os.makedirs(log_path, exist_ok=True)
|
| 147 |
+
# contents = os.listdir(output_path)
|
| 148 |
+
# if not contents:
|
| 149 |
+
# command = (
|
| 150 |
+
# "cd /opt/aps/workdir/math/evaluation && "
|
| 151 |
+
# "CUDA_VISIBLE_DEVICES={device} "
|
| 152 |
+
# f"nohup {python3} -u run_eval_32b.py "
|
| 153 |
+
# f"--data_name {dataset} "
|
| 154 |
+
# f"--target_path {output_path} "
|
| 155 |
+
# f"--model_name_or_path {checkpoint_dir} "
|
| 156 |
+
# "--prompt v4 "
|
| 157 |
+
# "--max_tokens 20000 "
|
| 158 |
+
# "--paralle_size 2 "
|
| 159 |
+
# f"> {log_path}/inf.log 2>&1 &"
|
| 160 |
+
# )
|
| 161 |
+
# commands.put(command)
|
| 162 |
+
# else:
|
| 163 |
+
# print(f"skip evaluated model: {base_name}")
|
| 164 |
+
|
| 165 |
+
def clear():
|
| 166 |
+
print("All checkpoints exist. Wait for runing...")
|
| 167 |
+
global commands
|
| 168 |
+
while not commands.empty():
|
| 169 |
+
# sleep(SLEEP_TIME)
|
| 170 |
+
commands = run(commands)
|
| 171 |
+
print("Wish me good luck!")
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
if __name__ == '__main__':
|
| 175 |
+
# print(sys.argv[1])
|
| 176 |
+
ckpt = sys.argv[1]
|
| 177 |
+
main()
|
| 178 |
+
clear()
|
deep_search/sft/wait_eval_use_one_model_for_ckpt_worker0.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from time import sleep
|
| 3 |
+
import datetime
|
| 4 |
+
import subprocess
|
| 5 |
+
import json
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import GPUtil
|
| 8 |
+
import sys
|
| 9 |
+
import random
|
| 10 |
+
|
| 11 |
+
wait_cnt = 0
|
| 12 |
+
SLEEP_TIME = 120
|
| 13 |
+
NEED_REVERSE = False
|
| 14 |
+
eppt = None
|
| 15 |
+
NEED_ATLAS = False
|
| 16 |
+
only_rank = False
|
| 17 |
+
commands = Queue()
|
| 18 |
+
|
| 19 |
+
# print(f"wait 120s")
|
| 20 |
+
# sleep(120)
|
| 21 |
+
|
| 22 |
+
def get_free_gpu(threshold=0.95):
|
| 23 |
+
# 获取所有可用的GPU设备
|
| 24 |
+
gpus = GPUtil.getGPUs()
|
| 25 |
+
# 筛选出显存空闲率高于阈值的GPU
|
| 26 |
+
available_gpus = [gpu.id for gpu in gpus if gpu.memoryFree > 80000 and gpu.id not in [4,5]]
|
| 27 |
+
print(f"available_gpus: {available_gpus}")
|
| 28 |
+
return available_gpus
|
| 29 |
+
|
| 30 |
+
def run(commands):
|
| 31 |
+
gpus = get_free_gpu()
|
| 32 |
+
if NEED_REVERSE:
|
| 33 |
+
gpus.reverse()
|
| 34 |
+
# random.shuffle(gpus)
|
| 35 |
+
# print(f"available_gpus: {gpus}")
|
| 36 |
+
while not commands.empty() and len(gpus) >= 2:
|
| 37 |
+
command = commands.get()
|
| 38 |
+
print(f"The following command is about to run:\n{command.format(device=f'{gpus[0]},{gpus[1]}')}")
|
| 39 |
+
subprocess.Popen(command.format(device=f'{gpus[0]},{gpus[1]}'), shell=True, start_new_session=True)
|
| 40 |
+
gpus = gpus[2:]
|
| 41 |
+
sleep(SLEEP_TIME)
|
| 42 |
+
return commands
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
global commands, wait_cnt, eppt, end_ckpt, only_rank
|
| 46 |
+
eppt = ckpt
|
| 47 |
+
math_datasets = [
|
| 48 |
+
'AIME24',
|
| 49 |
+
]
|
| 50 |
+
# qa_datasets = [
|
| 51 |
+
# 'aime', 'hotpotqa','2wiki', 'musique', "amc", "math500"
|
| 52 |
+
# ]
|
| 53 |
+
# qa_datasets = [
|
| 54 |
+
# 'simpleqa', 'hotpotqa'
|
| 55 |
+
# ]
|
| 56 |
+
# qa_datasets = [
|
| 57 |
+
# 'eval', "realqa"
|
| 58 |
+
# ]
|
| 59 |
+
qa_datasets = [
|
| 60 |
+
'gaia'
|
| 61 |
+
]
|
| 62 |
+
ckpt_dir = f"{ckpt}"
|
| 63 |
+
|
| 64 |
+
all_items = os.listdir(ckpt_dir)
|
| 65 |
+
step_list = []
|
| 66 |
+
for item in all_items:
|
| 67 |
+
if item.startswith("checkpoint-"):
|
| 68 |
+
try:
|
| 69 |
+
step = int(item.split("-")[1])
|
| 70 |
+
step_list.append(step)
|
| 71 |
+
except ValueError:
|
| 72 |
+
continue
|
| 73 |
+
|
| 74 |
+
step_list.sort(reverse=True) # 从大到小排序
|
| 75 |
+
# print(f"step_list: {step_list}")
|
| 76 |
+
# # step_list = step_list[:2] # 只取最新的checkpoint
|
| 77 |
+
print(f"step_list: {step_list}")
|
| 78 |
+
step_list = step_list[:4]
|
| 79 |
+
print(f"step_list: {step_list}")
|
| 80 |
+
for step in step_list:
|
| 81 |
+
checkpoint_dir = f"{ckpt}/checkpoint-{step}"
|
| 82 |
+
export_dir = checkpoint_dir
|
| 83 |
+
wait_cnt = 0
|
| 84 |
+
print(f"Waiting for checkpoint ({checkpoint_dir}) to exist...")
|
| 85 |
+
|
| 86 |
+
while not os.path.exists(os.path.join(checkpoint_dir, "special_tokens_map.json")):
|
| 87 |
+
commands = run(commands)
|
| 88 |
+
wait_cnt += 1
|
| 89 |
+
print(f"Already waiting {datetime.timedelta(seconds=SLEEP_TIME*wait_cnt)}.")
|
| 90 |
+
if wait_cnt > (86400 // SLEEP_TIME):
|
| 91 |
+
clear()
|
| 92 |
+
raise Exception("There have been no new checkpoints for no less than a day, and the program is about to automatically exit.")
|
| 93 |
+
# python3 = "/opt/aps/workdir/miniforge3/envs/search_o1/bin/python3"
|
| 94 |
+
python3 = "/opt/aps/workdir/search_o1/bin/python3"
|
| 95 |
+
# /opt/aps/workdir/miniforge3/envs/search_o1/bin/python
|
| 96 |
+
print("The checkpoint exists. Waiting for running...")
|
| 97 |
+
for dataset in qa_datasets:
|
| 98 |
+
base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 99 |
+
dir = "/opt/aps/workdir/sunshuang/deep_search/search_o1"
|
| 100 |
+
# cache_path = f"{dir}/cache_reason_two_model/eval_reason_two_model/qwen-instruct-32B/{base_name}"
|
| 101 |
+
cache_path = f"{dir}/cache_eval_sum_all_webpage"
|
| 102 |
+
output_path = f"{dir}/output/output_eval/{base_name}"
|
| 103 |
+
log_path = f"{dir}/logs/log_eval/{base_name}"
|
| 104 |
+
os.makedirs(cache_path, exist_ok=True)
|
| 105 |
+
os.makedirs(output_path, exist_ok=True)
|
| 106 |
+
os.makedirs(log_path, exist_ok=True)
|
| 107 |
+
contents = os.listdir(output_path)
|
| 108 |
+
|
| 109 |
+
# subset_num = 500
|
| 110 |
+
# if dataset == "math500":
|
| 111 |
+
# subset_num = 500
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
if not contents:
|
| 115 |
+
command = (
|
| 116 |
+
"export https_proxy=http://127.0.0.1:7890 && "
|
| 117 |
+
"export http_proxy=http://127.0.0.1:7890 && "
|
| 118 |
+
"export all_proxy=socks5://127.0.0.1:7891 && "
|
| 119 |
+
"cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && "
|
| 120 |
+
"CUDA_VISIBLE_DEVICES={device} "
|
| 121 |
+
f"nohup {python3} -u scripts/search_o1_sum_all_webpage.py "
|
| 122 |
+
f"--dataset_name {dataset} "
|
| 123 |
+
f"--cache_dir_base {cache_path} "
|
| 124 |
+
f"--output_dir_base {output_path} "
|
| 125 |
+
"--split test "
|
| 126 |
+
"--max_search_limit 10 "
|
| 127 |
+
"--max_turn 10 "
|
| 128 |
+
"--top_k 10 "
|
| 129 |
+
"--max_doc_len 5000 "
|
| 130 |
+
f"--model_path {checkpoint_dir} "
|
| 131 |
+
"--bing_subscription_key \"cb0d28279a826d7e5cf22d71f683c77ffd4ba27d\" "
|
| 132 |
+
"--bing_endpoint \"https://google.serper.dev/search\" "
|
| 133 |
+
f"> {log_path}/inf.log 2>&1 &"
|
| 134 |
+
)
|
| 135 |
+
print(f"command: {command}")
|
| 136 |
+
commands.put(command)
|
| 137 |
+
else:
|
| 138 |
+
print(f"skip evaluated model: {base_name}")
|
| 139 |
+
|
| 140 |
+
# continue
|
| 141 |
+
|
| 142 |
+
# for dataset in math_datasets:
|
| 143 |
+
# dir = "/opt/aps/workdir/math/evaluation"
|
| 144 |
+
# base_name = f"{os.path.basename(ckpt)}/{step}/{dataset}"
|
| 145 |
+
# output_path = f"{dir}/outputs_maxtoken_20000/{base_name}"
|
| 146 |
+
# log_path = f"{dir}/logs_maxtoken_20000/{base_name}"
|
| 147 |
+
# os.makedirs(output_path, exist_ok=True)
|
| 148 |
+
# os.makedirs(log_path, exist_ok=True)
|
| 149 |
+
# contents = os.listdir(output_path)
|
| 150 |
+
# if not contents:
|
| 151 |
+
# command = (
|
| 152 |
+
# "cd /opt/aps/workdir/math/evaluation && "
|
| 153 |
+
# "CUDA_VISIBLE_DEVICES={device} "
|
| 154 |
+
# f"nohup {python3} -u run_eval_32b.py "
|
| 155 |
+
# f"--data_name {dataset} "
|
| 156 |
+
# f"--target_path {output_path} "
|
| 157 |
+
# f"--model_name_or_path {checkpoint_dir} "
|
| 158 |
+
# "--prompt v4 "
|
| 159 |
+
# "--max_tokens 20000 "
|
| 160 |
+
# "--paralle_size 2 "
|
| 161 |
+
# f"> {log_path}/inf.log 2>&1 &"
|
| 162 |
+
# )
|
| 163 |
+
# commands.put(command)
|
| 164 |
+
# else:
|
| 165 |
+
# print(f"skip evaluated model: {base_name}")
|
| 166 |
+
|
| 167 |
+
def clear():
|
| 168 |
+
print("All checkpoints exist. Wait for runing...")
|
| 169 |
+
global commands
|
| 170 |
+
while not commands.empty():
|
| 171 |
+
# sleep(SLEEP_TIME)
|
| 172 |
+
commands = run(commands)
|
| 173 |
+
print("Wish me good luck!")
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
if __name__ == '__main__':
|
| 177 |
+
# print(sys.argv[1])
|
| 178 |
+
ckpt = sys.argv[1]
|
| 179 |
+
main()
|
| 180 |
+
clear()
|
ssh_node.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ssh lmlabide-87fe1717-6eec-4fb7-94c7-4e7919588b88-worker-0
|
| 2 |
+
ssh lmlabide-87fe1717-6eec-4fb7-94c7-4e7919588b88-worker-1
|
| 3 |
+
ssh lmlabide-87fe1717-6eec-4fb7-94c7-4e7919588b88-worker-2
|
| 4 |
+
ssh lmlabide-87fe1717-6eec-4fb7-94c7-4e7919588b88-worker-3
|
| 5 |
+
ssh lmlabide-87fe1717-6eec-4fb7-94c7-4e7919588b88-worker-4
|
test.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# import subprocess
|
| 3 |
+
|
| 4 |
+
# def check_gpu_processes_with_nvitop(gpu_id):
|
| 5 |
+
# try:
|
| 6 |
+
# # 调用 nvitop 命令,指定 GPU ID 和单次查询模式
|
| 7 |
+
# result = subprocess.run(
|
| 8 |
+
# ["nvitop", "-o", str(gpu_id), "--once"],
|
| 9 |
+
# stdout=subprocess.PIPE,
|
| 10 |
+
# stderr=subprocess.PIPE,
|
| 11 |
+
# text=True
|
| 12 |
+
# )
|
| 13 |
+
|
| 14 |
+
# # 检查命令是否执行成功
|
| 15 |
+
# if result.returncode != 0:
|
| 16 |
+
# print(f"Error running nvitop: {result.stderr} [[5]]")
|
| 17 |
+
# return True # 如果错误就返回True,即当前GPU不可用
|
| 18 |
+
|
| 19 |
+
# # 解析输出内容
|
| 20 |
+
# has_processes = True
|
| 21 |
+
# len_res = len(result.stdout.splitlines())
|
| 22 |
+
|
| 23 |
+
# no_processes_str = "No running processes found"
|
| 24 |
+
# for line in result.stdout.splitlines():
|
| 25 |
+
# line = line.strip()
|
| 26 |
+
# if no_processes_str in line:
|
| 27 |
+
# print("No running processes found.")
|
| 28 |
+
# has_processes = False
|
| 29 |
+
|
| 30 |
+
# return has_processes
|
| 31 |
+
|
| 32 |
+
# except FileNotFoundError:
|
| 33 |
+
# print("nvitop not found. Install it via 'pip install nvitop' [[4]]")
|
| 34 |
+
# return True # 如果错误就返回True,即当前GPU不可用
|
| 35 |
+
|
| 36 |
+
# # 调用函数,指定 GPU 0
|
| 37 |
+
# print(check_gpu_processes_with_nvitop(1))
|
| 38 |
+
|
| 39 |
+
# 初始化变量 ll
|
| 40 |
+
l1 = [3, [66, 55, 44], (7, 8, 9)]
|
| 41 |
+
l2 = list(l1) # 浅复制了 ll
|
| 42 |
+
|
| 43 |
+
# 输出l1 l2每个元素的id
|
| 44 |
+
print("l1")
|
| 45 |
+
print(id(l1[0]), id(l1[1]), id(l1[2]))
|
| 46 |
+
print("l2")
|
| 47 |
+
print(id(l2[0]), id(l2[1]), id(l2[2]))
|
| 48 |
+
|
| 49 |
+
l1.append(100)
|
| 50 |
+
l1[1].remove(55)
|
| 51 |
+
print("l1")
|
| 52 |
+
print(id(l1[0]), id(l1[1]), id(l1[2]))
|
| 53 |
+
print("l2")
|
| 54 |
+
print(id(l2[0]), id(l2[1]), id(l2[2]))
|
| 55 |
+
l2[1] += [33, 22]
|
| 56 |
+
l2[2] += (10, 11)
|
| 57 |
+
print(id(l1[0]), id(l1[1]), id(l1[2]))
|
| 58 |
+
print(id(l2[0]), id(l2[1]), id(l2[2]))
|
train_requirements.txt
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
numpy==1.26.4
|
| 2 |
+
rouge-chinese==1.0.3
|
| 3 |
+
fire==0.7.0
|
| 4 |
+
transformers==4.46.0
|
| 5 |
+
torch==2.1.0
|
| 6 |
+
|
| 7 |
+
accelerate==0.26.0
|
| 8 |
+
aiofiles==23.2.1
|
| 9 |
+
aiohappyeyeballs==2.4.6
|
| 10 |
+
aiohttp==3.11.12
|
| 11 |
+
aiosignal==1.3.2
|
| 12 |
+
altair==5.5.0
|
| 13 |
+
anyio==4.8.0
|
| 14 |
+
attrs==25.1.0
|
| 15 |
+
certifi==2025.1.31
|
| 16 |
+
charset-normalizer==3.4.1
|
| 17 |
+
click==8.1.8
|
| 18 |
+
contourpy==1.3.1
|
| 19 |
+
cycler==0.12.1
|
| 20 |
+
datasets==2.15.0
|
| 21 |
+
deepspeed==0.14.5
|
| 22 |
+
dill==0.3.7
|
| 23 |
+
einops==0.8.1
|
| 24 |
+
fastapi==0.95.1
|
| 25 |
+
ffmpy==0.5.0
|
| 26 |
+
filelock==3.17.0
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
fonttools==4.56.0
|
| 30 |
+
frozenlist==1.5.0
|
| 31 |
+
fsspec==2023.10.0
|
| 32 |
+
gputil==1.4.0
|
| 33 |
+
gradio==3.38.0
|
| 34 |
+
gradio-client==1.7.1
|
| 35 |
+
h11==0.14.0
|
| 36 |
+
hjson==3.1.0
|
| 37 |
+
httpcore==1.0.7
|
| 38 |
+
httpx==0.28.1
|
| 39 |
+
huggingface-hub==0.29.1
|
| 40 |
+
idna==3.10
|
| 41 |
+
jieba==0.42.1
|
| 42 |
+
jinja2==3.1.5
|
| 43 |
+
joblib==1.4.2
|
| 44 |
+
jsonschema==4.23.0
|
| 45 |
+
jsonschema-specifications==2024.10.1
|
| 46 |
+
kiwisolver==1.4.8
|
| 47 |
+
linkify-it-py==2.0.3
|
| 48 |
+
markdown-it-py==2.2.0
|
| 49 |
+
markupsafe==2.1.5
|
| 50 |
+
matplotlib==3.10.0
|
| 51 |
+
mdit-py-plugins==0.3.3
|
| 52 |
+
mdurl==0.1.2
|
| 53 |
+
mpmath==1.3.0
|
| 54 |
+
multidict==6.1.0
|
| 55 |
+
multiprocess==0.70.15
|
| 56 |
+
narwhals==1.27.1
|
| 57 |
+
networkx==3.4.2
|
| 58 |
+
ninja==1.11.1.3
|
| 59 |
+
nltk==3.9.1
|
| 60 |
+
|
| 61 |
+
nvidia-cublas-cu12==12.1.3.1
|
| 62 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
| 63 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
| 64 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
| 65 |
+
nvidia-cudnn-cu12==8.9.2.26
|
| 66 |
+
nvidia-cufft-cu12==11.0.2.54
|
| 67 |
+
nvidia-curand-cu12==10.3.2.106
|
| 68 |
+
nvidia-cusolver-cu12==11.4.5.107
|
| 69 |
+
nvidia-cusparse-cu12==12.1.0.106
|
| 70 |
+
nvidia-ml-py==12.570.86
|
| 71 |
+
nvidia-nccl-cu12==2.18.1
|
| 72 |
+
nvidia-nvjitlink-cu12==12.8.61
|
| 73 |
+
nvidia-nvtx-cu12==12.1.105
|
| 74 |
+
orjson==3.10.15
|
| 75 |
+
packaging==24.2
|
| 76 |
+
pandas==2.2.3
|
| 77 |
+
peft==0.6.1
|
| 78 |
+
pillow==10.4.0
|
| 79 |
+
propcache==0.3.0
|
| 80 |
+
protobuf==5.29.3
|
| 81 |
+
psutil==7.0.0
|
| 82 |
+
py-cpuinfo==9.0.0
|
| 83 |
+
pyarrow==19.0.1
|
| 84 |
+
pyarrow-hotfix==0.6
|
| 85 |
+
pydantic==1.10.11
|
| 86 |
+
pydub==0.25.1
|
| 87 |
+
pyparsing==3.2.1
|
| 88 |
+
python-dateutil==2.9.0.post0
|
| 89 |
+
python-multipart==0.0.20
|
| 90 |
+
pytz==2025.1
|
| 91 |
+
pyyaml==6.0.2
|
| 92 |
+
referencing==0.36.2
|
| 93 |
+
regex==2024.11.6
|
| 94 |
+
requests==2.32.3
|
| 95 |
+
|
| 96 |
+
rpds-py==0.23.1
|
| 97 |
+
safetensors==0.5.2
|
| 98 |
+
scipy==1.15.2
|
| 99 |
+
semantic-version==2.10.0
|
| 100 |
+
sentencepiece==0.2.0
|
| 101 |
+
six==1.17.0
|
| 102 |
+
sniffio==1.3.1
|
| 103 |
+
sse-starlette==2.1.3
|
| 104 |
+
starlette==0.26.1
|
| 105 |
+
sympy==1.13.3
|
| 106 |
+
termcolor==2.5.0
|
| 107 |
+
tiktoken==0.9.0
|
| 108 |
+
tokenizers==0.20.3
|
| 109 |
+
|
| 110 |
+
tqdm==4.67.1
|
| 111 |
+
|
| 112 |
+
triton==2.1.0
|
| 113 |
+
typing-extensions==4.12.2
|
| 114 |
+
tzdata==2025.1
|
| 115 |
+
uc-micro-py==1.0.3
|
| 116 |
+
urllib3==2.3.0
|
| 117 |
+
uvicorn==0.34.0
|
| 118 |
+
websockets==11.0.3
|
| 119 |
+
xxhash==3.5.0
|
| 120 |
+
yarl==1.18.3
|
| 121 |
+
|
| 122 |
+
flash-attn==2.7.1.post1
|