Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- deep_search/SimpleDeepSearcher/LICENSE +21 -0
- deep_search/SimpleDeepSearcher/README.md +209 -0
- deep_search/data_from_zhiyuan/data_for_dpo/3k_question/domain_counts.json +24 -0
- deep_search/data_from_zhiyuan/data_for_dpo/3k_question/domain_keypoints_distribution_without_remove_dup.json +0 -0
- deep_search/data_from_zhiyuan/data_for_dpo/3k_question/find_dup.py +56 -0
- deep_search/data_from_zhiyuan/data_for_dpo/3k_question/load_data.py +21 -0
- deep_search/data_from_zhiyuan/data_for_dpo/3k_question/source_counts.json +6 -0
- deep_search/data_from_zhiyuan/data_for_rl/MultiHopRAG_only_qa.json +0 -0
- deep_search/data_from_zhiyuan/data_for_rl/domain_counts.json +546 -0
- deep_search/data_from_zhiyuan/data_for_rl/jsonl_to_json.py +27 -0
- deep_search/data_from_zhiyuan/data_for_rl/merge_data.py +47 -0
- deep_search/data_from_zhiyuan/data_for_rl/musique_ans_v1.0_train_only_qa.json +0 -0
- deep_search/data_from_zhiyuan/data_for_rl/musique_ans_v1.0_train_only_qa.jsonl +0 -0
- deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/domain_counts.json +10 -0
- deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/domain_keypoints_distribution_without_remove_dup.json +0 -0
- deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/key_points_count.json +0 -0
- deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/source_counts.json +3 -0
- deep_search/data_from_zhiyuan/data_for_rl/select_source.py +35 -0
- deep_search/data_from_zhiyuan/data_for_rl/source_counts.json +6 -0
- deep_search/data_from_zhiyuan/data_for_rl/tagged_domain_keypoints/domain_counts.json +41 -0
- deep_search/data_from_zhiyuan/data_for_rl/tagged_domain_keypoints/domain_keypoints_distribution_without_remove_dup.json +0 -0
- deep_search/data_from_zhiyuan/data_for_rl/tagged_domain_keypoints/source_counts.json +6 -0
- deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_2.log +32 -0
- deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_3.log +30 -0
- deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_2.log +27 -0
- deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_3.log +41 -0
- deep_search/sft/4-1_1.1k_cleaned_data_871_doc_by_itself_QwQ-32B_2.log +14 -0
- deep_search/sft/4-24__doc_by_itself__add_math871_after_search.log +4 -0
- deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_Qwen2.5-7B-Instruct_first_math.log +43 -0
- deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search_2.log +22 -0
- deep_search/sft/4-24_no_error_data_871_doc_by_itself_QwQ-32B.log +0 -0
- deep_search/sft/4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871.log +159 -0
- deep_search/sft/4-25_no_error_data_871_doc_by_itself_QwQ-32B.log +488 -0
- deep_search/sft/4-4_no_error_data_871_doc_by_itself_Qwen2.5-32B-Instruct.log +25 -0
- deep_search/sft/5-5_ablation_subquery_1073_random_sample_871_Qwen2.5-7B-Instruct.log +114 -0
- deep_search/sft/ds_zero3.json +31 -0
- deep_search/sft/gen_data_1.log +4 -0
- deep_search/sft/math_eval.sh +32 -0
- deep_search/sft/mix_1.sh +317 -0
- deep_search/sft/mix_math_first.sh +61 -0
- deep_search/sft/mix_math_multi_node.sh +66 -0
- deep_search/sft/mix_re.sh +175 -0
- deep_search/sft/nohup.out +41 -0
- deep_search/sft/sft_1.py +256 -0
- deep_search/sft/sft_2_math.py +260 -0
- deep_search/sft/sft_2_math_sht_new_prompt.py +260 -0
- deep_search/sft/sft_logs/3-28.log +0 -0
- deep_search/sft/sft_logs/4-1.log +0 -0
- deep_search/sft/sft_logs/4-24-math_after_search.log +0 -0
- deep_search/sft/sft_logs/4-24-mixed_math.log +0 -0
deep_search/SimpleDeepSearcher/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 RUCAIBox
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
deep_search/SimpleDeepSearcher/README.md
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
<h1 align="center"> SimpleDeepSearcher: Deep Information Seeking via Web-Powered Reasoning Trajectory Synthesis</a></h1>
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
<div align="center">
|
| 6 |
+
<a href="https://github.com/RUCAIBox/SimpleDeepSearcher/blob/main/LICENSE"><img src="https://img.shields.io/badge/Code_License-MIT-blue" alt="license"></a>
|
| 7 |
+
<a href="https://github.com/RUCAIBox/SimpleDeepSearcher/blob/main/LICENSE"><img src="https://img.shields.io/badge/Model_License-MIT-bluduie" alt="license"></a>
|
| 8 |
+
<a href="https://huggingface.co/XXsongLALA"><img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-blue?color=8A2BE2"></a>
|
| 9 |
+
<a href="https://arxiv.org/pdf/2503.05592" target="_blank"><img src=https://img.shields.io/badge/arXiv-b5212f.svg?logo=arxiv></a>
|
| 10 |
+
|
| 11 |
+
</div>
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
<p align="center">
|
| 15 |
+
<img src="./assets/simplelog.jpg" alt="Example Image" width="550"/>
|
| 16 |
+
</p>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
<h5 align="center"> If you like our project, please give us a star ⭐ on GitHub for the latest update.</h5>
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# 💡 Overview
|
| 23 |
+
|
| 24 |
+
<p align="center">
|
| 25 |
+
<img src="./assets/benchmarksfigure_7B.png" alt="Image 1" width="400"/>
|
| 26 |
+
<img src="./assets/benchmarksfigure_7B.png" alt="Image 2" width="400"/>
|
| 27 |
+
</p>
|
| 28 |
+
|
| 29 |
+
Recently, researchers begin to explore methods for enhancing LLMs’ complex reasoning capabilities in information retrieval tasks. These approaches typically leverage reinforcement learning to stimulate autonomous retrieval during the reasoning process. Notably, such methods require only the raw questions as input, without the need for high-quality answer supervision. While effective in improving model performance, reinforcement learning incurs substantial training overhead. Moreover, many current approaches rely on local retrieval databases; transitioning to web-based search systems further reduces training efficiency. Additionally, methods employing online search often demand significant computational resources, hindering the broader adoption of complex reasoning-based retrieval systems. This motivates the need for a solution that enables powerful reasoning with minimal training cost.
|
| 30 |
+
|
| 31 |
+
**To this end, we propose *SimpleDeepSearcher*, a framework designed to stimulate autonomous retrieval during complex reasoning via knowledge distillation and self-distillation. The goal is to achieve efficient and effective training using only a small amount of data.** Despite its conceptual simplicity, constructing high-quality training data presents two key challenges. On the query side, existing open-source datasets often suffer from issues such as imbalanced topic distributions, repetitive structures, and insufficient complexity, limiting their utility in eliciting deep retrieval behavior. On the response side, solving deep retrieval tasks requires effectively decomposing complex queries while avoiding invalid reasoning steps and overthinking—objectives that are fundamentally distinct from those in traditional mathematical or logical reasoning tasks.
|
| 32 |
+
|
| 33 |
+
To address these challenges, we first perform fine-grained filtering of existing open-source datasets based on multiple dimensions including domain coverage, structural variety, and question complexity. This ensures that the selected queries exhibit diverse domains and structures, as well as a balanced difficulty distribution. Next, we perform rollout sampling using large reasoning models in a real-world retrieval environment. The resulting traces are then filtered again based on criteria such as format, subquery quality, question difficulty, and reasoning path integrity, in order to eliminate redundant reasoning. The curated data is subsequently used to train multiple models, enabling us to explore the potential of distillation techniques in fostering autonomous retrieval capabilities.
|
| 34 |
+
|
| 35 |
+
**We evaluate our proposed method on five challenging benchmarks—2WikiMultiHopQA, Bamboogle, Musique, FRAMES, and GAIA—and our results demonstrate that *SimpleDeepSearcher* consistently outperforms a range of recent state-of-the-art baselines.**
|
| 36 |
+
|
| 37 |
+
**We release all training and inference code, along with model checkpoints. Additionally, we provide two highly efficient supervised fine-tuning datasets of 0.5k and 0.8k examples. The 0.5k dataset features more direct reasoning paths, while the 0.8k dataset includes richer reflection and rethinking processes. A detailed technical report will be released in the near future**.
|
| 38 |
+
|
| 39 |
+
- **Models**:
|
| 40 |
+
- Qwen-7B-SimpleDeepSearcher: https://huggingface.co/RUC-AIBOX/Qwen-7B-SimpleDeepSearcher
|
| 41 |
+
- Qwen-32B-SimpleDeepSearcher: https://huggingface.co/RUC-AIBOX/Qwen-32B-SimpleDeepSearcher
|
| 42 |
+
- Dpsk-Distilled-Qwen-32B-SimpleDeepSearcher: [https://huggingface.co/RUC-AIBOX/Dpsk-Distilled-Qwen-32B-SimpleDeepSearche](https://huggingface.co/RUC-AIBOX/Dpsk-Distilled-Qwen-32B-SimpleDeepSearcher)
|
| 43 |
+
- QwQ-32B-SimpleDeepSearcher: https://huggingface.co/RUC-AIBOX/QwQ-32B-SimpleDeepSearcher
|
| 44 |
+
- **Training Data**:
|
| 45 |
+
- 0.5k: https://huggingface.co/datasets/RUC-AIBOX/0.5k-data-SimpleDeepSearcher
|
| 46 |
+
- 0.8k: https://huggingface.co/datasets/RUC-AIBOX/0.8k-data-SimpleDeepSearcher
|
| 47 |
+
- **GitHub**: [https://github.com/RUCAIBox/SimpleDeepSearcher](https://github.com/RUCAIBox/CyberSearcher)
|
| 48 |
+
- **Notion**: [https://sweet-walkover-f9b.notion.site/SimpleDeepSearcher-Deep-Information-Seeking-via-Web-Powered-Reasoning-Trajectory-Synthesis-1d1c27a43d7a801090d8ce1a75b2d6d0?pvs=4](https://sweet-walkover-f9b.notion.site/SimpleDeepSearcher-Deep-Information-Seeking-via-Web-Powered-Reasoning-Trajectory-Synthesis-1d1c27a43d7a801090d8ce1a75b2d6d0?pvs=4)
|
| 49 |
+
|
| 50 |
+
# ✨ Key Insights
|
| 51 |
+
1. **Data Synthesis Based on Real-World Web Environments**: We design a large-scale data synthesis pipeline grounded in authentic open-web environments, enhancing the diversity and realism of training corpora. This significantly improves the model’s ability to retrieve and integrate information in complex search tasks.
|
| 52 |
+
2. **Rigorous Data Filtering Strategy**: We introduce a task-specific QA pair filtering method tailored for search-oriented training, enabling fine-grained selection of high-quality training samples.
|
| 53 |
+
3. **Efficient Performance Boost with Limited Data**: Using only 871 distilled examples, our 7B-scale model surpasses existing models trained via reinforcement learning. Notably, Qwen-32B-Instruct approaches the performance of QwQ-32B, which possesses built-in retrieval capabilities, while also enabling further performance gains for QwQ-32B itself.
|
| 54 |
+
4. **Generalization to OOD Evaluation Sets**: Training on conventional multi-hop datasets leads to strong generalization capabilities on out-of-distribution (OOD) benchmarks, including FRAMES and GAIA.
|
| 55 |
+
5. **Analysis of Post-Distillation Reinforcement Learning**: We further finetune the distilled 7B model with reinforcement learning and provide an in-depth analysis of the training dynamics and performance impact.
|
| 56 |
+
|
| 57 |
+
# ✨ Methodology and Technical Framework
|
| 58 |
+
## Data Synthesis
|
| 59 |
+
**In contrast to traditional Retrieval-Augmented Generation (RAG) systems that rely on closed and static knowledge bases, our approach situates the retrieval and generation process within the open and dynamic environment of the real-world internet.** This setting is designed to enhance the model's capability for information awareness and integration in authentic search scenarios.
|
| 60 |
+
|
| 61 |
+
Specifically, instead of utilizing a controlled and curated document collection, our system operates within the open web—an inherently noisy, diverse, and constantly evolving information space. Web content varies widely in format (e.g., encyclopedic entries, forums, news articles, advertisements), quality, and structure, often consisting of unstructured text and heterogeneous linguistic styles. Such a complex environment more accurately reflects real-world user conditions and imposes greater demands on the model's ability to extract, synthesize, and reason over information.
|
| 62 |
+
|
| 63 |
+
Against this backdrop, we develop an automated data synthesis pipeline based on the popular "web search – content filtering – information summarization – answer synthesis" framework. Leveraging a strong reasoning model (QwQ-32B), we conduct large-scale rollouts in the real web environment: for each curated question, the model generates multiple high-quality answers grounded in search engine results. This process requires the model not only to construct precise and relevant subqueries, but also to identify and extract salient information from noisy, redundant, or even contradictory web sources, thereby producing accurate, concise, and well-structured outputs.
|
| 64 |
+
|
| 65 |
+
Through this method of data synthesis grounded in real-world web contexts, we significantly improve the diversity and authenticity of training samples. This, in turn, provides more practically aligned supervision signals for fine-tuning, and offers a novel pathway for enhancing the retrieval capabilities of large language models in open-domain question answering tasks.
|
| 66 |
+
|
| 67 |
+
## Data Source Selection and Filtering
|
| 68 |
+
We selected a combination of single-hop and multi-hop datasets, including *Natural Questions*, *HotpotQA*, *2WikiMultihopQA*, *Musique*, *SimpleQA*, and *MultiHop-RAG*. These datasets provide a rich variety of query types, ensuring broad coverage across training data.
|
| 69 |
+
|
| 70 |
+
To ensure the quality and diversity of the training samples, we developed a fine-grained filtering methodology for both questions and responses. Specifically:
|
| 71 |
+
|
| 72 |
+
**1. Question Filtering**
|
| 73 |
+
|
| 74 |
+
**Filtering criteria include:**
|
| 75 |
+
|
| 76 |
+
- **Domain Diversity**: Ensuring that questions span across a wide range of knowledge domains.
|
| 77 |
+
- **Keyword Diversity**: Keywords refer to the key entities, attributes, and relations mentioned in the question. This helps reduce redundancy and enhances the diversity of question patterns.
|
| 78 |
+
- **Coverage of Specific Interrogative Words**: Increasing the complexity of the questions by emphasizing the use of varied interrogative terms.
|
| 79 |
+
|
| 80 |
+
We utilized QwQ-32B to annotate each question with its domain and extracted keywords. The number of specific interrogative words in each question was also quantified. The detailed question filtering procedure is illustrated in following Figure:
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
<p align="center">
|
| 84 |
+
<img src="./assets/filter_algm.png" alt="Example Image" width="500"/>
|
| 85 |
+
</p>
|
| 86 |
+
|
| 87 |
+
**2. Response Filtering**
|
| 88 |
+
|
| 89 |
+
We impose strict constraints on both the format and content of generated responses, retaining only those that satisfy all predefined criteria.
|
| 90 |
+
|
| 91 |
+
**Filtering criteria include:**
|
| 92 |
+
|
| 93 |
+
- **Format compliance:** Responses exhibiting mixed Chinese-English usage or malformed special formatting are discarded to ensure consistency and clarity.
|
| 94 |
+
- **Sub-query validity:** We prioritize responses containing fewer redundant retrievals and lower content overlap between sub-queries, thereby improving overall retrieval efficiency.
|
| 95 |
+
- **Question difficulty:** We deliberately select questions with a low correct response rate across multiple rollout trials to enhance the model’s ability to handle complex queries.
|
| 96 |
+
- **Reasoning path control:** We enforce tight restrictions on the use of reflective expressions (e.g., *alternatively*, *wait*, etc.) and limit single-pass reasoning length to avoid ineffective or irrelevant reasoning chains.
|
| 97 |
+
|
| 98 |
+
For the selected open-source dataset, we first apply the proposed question filtering algorithm to identify questions. This process yields a collection of questions that are rich in domain variety, diverse in patterns, and high in complexity. Subsequently, based on the data synthesis strategy designed to mimic real-world internet environments, we use the QwQ-32B model to conduct large-scale rollouts for each selected question, generating multiple candidate responses. Finally, we apply the above filtering criteria to rigorously select high-quality responses from the candidate pool. Through this multi-stage process, we construct a dataset comprising 871 high-quality question-response pairs.
|
| 99 |
+
|
| 100 |
+
# 📄 Evaluation
|
| 101 |
+
## Seettings
|
| 102 |
+
- **Benchmarks**: We evaluate our model on five benchmarks: *2WikiMultiHopQA*, *Bamboogle*, *Musique*, *FRAMES*, and *GAIA*. Among them, *2WikiMultiHopQA* and *Musique* are considered in-domain datasets, as their training sets were used during model development. In contrast, *Bamboogle*, *FRAMES*, and *GAIA* are treated as out-of-domain datasets. Notably, *FRAMES* is designed to assess factual consistency, retrieval accuracy, and reasoning capability, while *GAIA* focuses on the model’s ability to solve complex real-world problems. We adopt the benchmark settings from [R1-Searcher](https://arxiv.org/pdf/2503.05592) for *2WikiMultiHopQA*, *Bamboogle*, and *Musique*. For *FRAMES*, we utilize the full test set, and for *GAIA*, we adopt the same evaluation subset as selected in [**WebThinker**](https://github.com/RUC-NLPIR/WebThinker).
|
| 103 |
+
- **Evaluation Metrics**: We use both F1 score and LLM-as-Judge (LasJ) as evaluation metrics. For LLM-as-Judge, judgments for *2WikiMultiHopQA*, *Bamboogle*, *Musique*, and *FRAMES* are made using GPT-4o-mini, while *GAIA* is evaluated using Qwen2.5-72B-Instruct (aligned with [WebThinker](https://github.com/RUC-NLPIR/WebThinker)).
|
| 104 |
+
- **Backbones**: The models used as backbones include Qwen-2.5-7B-Instruct, Qwen-2.5-32B-Instruct, Deepseek-Distilled-Qwen-2.5-32B, and QwQ-32B. These cover models of different sizes and inference models.
|
| 105 |
+
- **Baselines**: We compare our approach against several baselines: *Direct Generation*, *Standard RAG*, *Search-o1*, *R1-Searcher*, *DeepResearcher*, and *WebThinker*. For *Standard RAG*, we employ the 2019 Wikipedia dump provided by KILT as the local dense retrieval corpus. This choice is motivated by the complexity of the queries, which makes effective retrieval via real-time web search challenging. All other retrieval-based methods use Google Search API for online document retrieval.
|
| 106 |
+
- **Training Details**: Supervised fine-tuning is performed on 871 rigorously curated examples using Qwen2.5-7B-Instruct, Qwen2.5-32B-Instruct, DeepSeek-R1-Distill-Qwen-32B, and QwQ-32B. During fine-tuning, external retrieval documents are masked to avoid learning from noisy or spurious information.
|
| 107 |
+
|
| 108 |
+
## Main Results
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
<p align="center">
|
| 112 |
+
<img src="./assets/benchmark_table_1.png" alt="Example Image" width="600"/>
|
| 113 |
+
</p>
|
| 114 |
+
<p align="center">
|
| 115 |
+
<img src="./assets/benchmark_table_2.png" alt="Example Image" width="600"/>
|
| 116 |
+
</p>
|
| 117 |
+
|
| 118 |
+
- **Significant Overall Performance Gains**: Compared with existing baseline approaches such as Directly Gen, Standard RAG, and Search-o1, SimpleDeepSearcher demonstrates a clear performance advantage across all five QA benchmarks.
|
| 119 |
+
- **Maintaining Generalization Ability**: Among the evaluated benchmarks, 2WikiMultihopQA and Musique serve as in-domain datasets, while Bamboogle, Frames, and GAIA represent out-of-domain scenarios. Our method achieves superior generalization, particularly on the more challenging Frames and GAIA datasets, significantly outperforming other existing methods. These results highlight the robustness and strong generalization capability of the trained model.
|
| 120 |
+
- **Consistent Improvements Across Model Scales**: SimpleDeepSearcher consistently improves performance across a range of model sizes, including both smaller models such as Qwen2.5-7B-Instruct and larger models like Qwen2.5-32B-Instruct, DeepSeek-R1-Distill-Qwen-7B, and QwQ-32B. This suggests that our proposed framework of distillation and self-distillation generalizes effectively across different model capacities.
|
| 121 |
+
- **Enhanced Retrieval Efficiency and Reasoning Compactness**: The trained models exhibit more efficient search invocation and streamlined reasoning processes. SimpleDeepSearcher not only improves the model's ability to decompose complex queries and generate precise and effective sub-queries, but also significantly reduces redundant inference. The resulting decision pathways are more concise, transparent, and coherent.
|
| 122 |
+
|
| 123 |
+
# 🌟 Analysis of Supervised Fine-Tuning (SFT)
|
| 124 |
+
## Impact of Data Filtering
|
| 125 |
+
We first compare the performance of the QwQ-32B model trained on a strictly filtered dataset of 871 samples against a model trained on a larger but unfiltered dataset of 2,699 samples. The experimental results indicate the following:
|
| 126 |
+
|
| 127 |
+
- The model trained on the strictly filtered 871 samples consistently outperforms the one trained on the unfiltered 2,699 samples, suggesting that high-quality data has a more pronounced effect in enhancing the model’s generalization capabilities.
|
| 128 |
+
- Compared to the model trained on the unfiltered data, the one trained on the strictly filtered dataset exhibits a higher average number of search steps, but significantly fewer occurrences of alternative reasoning paths and shorter response lengths. This indicates that the filtered data is more effective in prompting the model to decompose complex queries efficiently while avoiding unnecessary reasoning steps and overthinking.
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
<p align="center">
|
| 132 |
+
<img src="./assets/sft_analysis_1.png" alt="Example Image" width="650"/>
|
| 133 |
+
</p>
|
| 134 |
+
|
| 135 |
+
<p align="center">
|
| 136 |
+
<img src="./assets/sft_analysis_2.png" alt="Example Image" width="650"/>
|
| 137 |
+
</p>
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
## Impact of Webpage Summarization Models
|
| 141 |
+
Since retrieved webpage content is often lengthy and contains substantial noise, directly inputting such content into the model for reasoning can easily exceed the context window and introduce irrelevant information that impairs reasoning performance. Therefore, it is necessary to summarize and condense the webpage content beforehand. We conducted a comparative analysis between using the model's own summarization capabilities and employing GPT-4o-mini as an external summarization model. Experimental findings demonstrate that the choice of summarization model has a significant impact on downstream performance. For the SFT-tuned Qwen-7B-Instruct model, using GPT-4o-mini as the summarizer consistently outperforms the model's own summaries across all datasets, yielding an average improvement of approximately 10 percentage points. A similar trend is observed for SFT-tuned Qwen-32B-Instruct on all datasets except 2Wiki. In contrast, for the SFT-tuned QwQ-32B model, self-generated summaries result in better performance.
|
| 142 |
+
<p align="center">
|
| 143 |
+
<img src="./assets/sft_analysis_3.png" alt="Example Image" width="650"/>
|
| 144 |
+
</p>
|
| 145 |
+
|
| 146 |
+
# 🌟 Continued RL Training Based on a 7B-SFT Model
|
| 147 |
+
## Settings
|
| 148 |
+
1. **SFT Dataset:** It is worth noting that despite our efforts to limit the frequency of analytical discourse markers (e.g., Alternatively, Wait, Hmm) in the distilled data, the 7B model still exhibited repetitive generation and overthinking tendencies after distillation. These behaviors led to slower convergence during reinforcement learning. To address this, we performed a second round of data filtering on the original 0.8k dataset and removed samples containing the term "Alternatively", resulting in a 0.5k subset used for supervised fine-tuning (SFT) and subsequent distillation. While this reduction may lead to a slight degradation in reasoning ability, we are currently training an RL model based on the full 0.8k dataset. For updates, please refer to this [repository](https://github.com/RUCAIBox/SimpleDeepSearcher).
|
| 149 |
+
2. **RL Dataset:** The SFT-tuned model was used to perform rollout sampling on the training sets of 2Wiki and HotpotQA. For each question, 8 rollouts were generated. We selected 2,480 samples from those questions with 1 to 6 correct answers to construct the RL training dataset.
|
| 150 |
+
3. **Reward Model:** Our reward function consists of two components: an answer reward and a format penalty.
|
| 151 |
+
- The answer reward is computed as the F1 score between the predicted answer and the reference answer.
|
| 152 |
+
- The format penalty is a discrete penalty of -2 applied if any of the following conditions are met (0 otherwise):
|
| 153 |
+
1. **Self-Retrieved Content:** The model fabricates external documents.
|
| 154 |
+
2. **Contains Gibberish:** The output contains nonsensical or corrupted text.
|
| 155 |
+
3. **Too Many Analytical Terms:** More than 5 instances of analytical markers such as Alternatively, Wait, or Hmm are present.
|
| 156 |
+
4. **No Boxed Answers:** The model performs more than 8 retrieval steps, or the analytical content between two retrievals exceeds 8,096 tokens.
|
| 157 |
+
## Evaluation
|
| 158 |
+
As shown in the table below, models distilled with the 0.5k dataset underperform those using the 0.8k dataset in terms of baseline reasoning ability. However, reinforcement learning significantly boosts performance in both cases, demonstrating its capacity to enhance the model’s autonomous retrieval abilities. RL training for the model distilled with the full 0.8k dataset is ongoing. Please refer to our repository for further updates.
|
| 159 |
+
<p align="center">
|
| 160 |
+
<img src="./assets/rl_analysis_1.png" alt="Example Image" width="650"/>
|
| 161 |
+
</p>
|
| 162 |
+
|
| 163 |
+
## Analysis
|
| 164 |
+
### Continuous Increase in Completion Rate and Reward, Gradual Decrease in Format Penalty
|
| 165 |
+
As shown in the figure below, all format penalties gradually decrease as training progresses, while both answer reward and total reward exhibit an overall upward trend. The completion rate also increases correspondingly. These results indicate that reinforcement learning is effective in enhancing the model's ability to generate responses with higher accuracy while adhering to the required formatting standards.
|
| 166 |
+
|
| 167 |
+
<p align="center">
|
| 168 |
+
<img src="./assets/rl_analysis_2.png" alt="Example Image" width="650"/>
|
| 169 |
+
</p>
|
| 170 |
+
|
| 171 |
+
<p align="center">
|
| 172 |
+
<img src="./assets/rl_analysis_3.png" alt="Example Image" width="650"/>
|
| 173 |
+
</p>
|
| 174 |
+
|
| 175 |
+
### Significant Reduction in Response Length and Slight Decrease in Retrieval Frequency
|
| 176 |
+
As illustrated in the figure below, 0.8k-sft-rl refers to the model fine-tuned with 0.8k instances via supervised fine-tuning (SFT) as the backbone for reinforcement learning (RL); 0.5k-sft-rl uses a backbone trained with 0.5k SFT instances; 0.5k-sft-rl-no-len-punishment shares the same backbone as 0.5k-sft-rl but removes the “Too Many Analytical Terms” penalty during RL.
|
| 177 |
+
|
| 178 |
+
Several observations can be made:
|
| 179 |
+
|
| 180 |
+
1. Models trained with 0.8k-sft as the RL backbone generally receive lower rewards. This is primarily due to the fact that, despite explicit constraints on the number of analytical terms in the distilled data, the 7B model still exhibits overthinking tendencies post-distillation, often triggering the “Too Many Analytical Terms” penalty and incurring format-related deductions.
|
| 181 |
+
2. Regardless of whether the reward model incorporates the “Too Many Analytical Terms” penalty, the overall trend shows a consistent reduction in response length. This suggests that reinforcement learning effectively guides the model to produce more concise and precise answers. However, this gain in succinctness may come at the expense of the distilled model's original reasoning capabilities.
|
| 182 |
+
|
| 183 |
+
<p align="center">
|
| 184 |
+
<img src="./assets/rl_analysis_4.png" alt="Example Image" width="800"/>
|
| 185 |
+
</p>
|
| 186 |
+
|
| 187 |
+
# 🏃 Quick Start
|
| 188 |
+
Coming Soon...
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# 📄 Citation
|
| 192 |
+
Please kindly cite our report if they are helpful for your research.
|
| 193 |
+
|
| 194 |
+
```
|
| 195 |
+
@article{SimpleDeepSearcher,
|
| 196 |
+
title={SimpleDeepSearcher: Deep Information Seeking via Web-Powered Reasoning Trajectory Synthesis},
|
| 197 |
+
author={Shuang Sun*, Huatong Song*, Yuhao Wang, Ruiyang Ren, Jinhao Jiang, Junjie Zhang, Lei Fang, Zhongyuan Wang, Wayne Xin Zhao, Ji-Rong Wen},
|
| 198 |
+
url={https://github.com/RUCAIBox/SimpleDeepSearcher},
|
| 199 |
+
year={2025}
|
| 200 |
+
}
|
| 201 |
+
```
|
| 202 |
+
|
| 203 |
+
# 📄 License
|
| 204 |
+
|
| 205 |
+
This project is released under the [MIT License](LICENSE).
|
| 206 |
+
|
| 207 |
+
# 📞 Contact
|
| 208 |
+
|
| 209 |
+
For any questions or feedback, please reach out to us at [sunshuanguns@gmail.com](sunshuanguns@gmail.com) or [songhuatong123@ruc.edu.cn](songhuatong123@ruc.edu.cn).
|
deep_search/data_from_zhiyuan/data_for_dpo/3k_question/domain_counts.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"business": 137,
|
| 3 |
+
"sports": 137,
|
| 4 |
+
"geography": 137,
|
| 5 |
+
"film": 137,
|
| 6 |
+
"education": 137,
|
| 7 |
+
"military": 137,
|
| 8 |
+
"media": 137,
|
| 9 |
+
"television": 137,
|
| 10 |
+
"religion": 136,
|
| 11 |
+
"aviation": 136,
|
| 12 |
+
"music": 136,
|
| 13 |
+
"biology": 136,
|
| 14 |
+
"literature": 136,
|
| 15 |
+
"people": 136,
|
| 16 |
+
"technology": 136,
|
| 17 |
+
"genealogy": 136,
|
| 18 |
+
"football": 136,
|
| 19 |
+
"politics": 136,
|
| 20 |
+
"history": 136,
|
| 21 |
+
"entertainment": 136,
|
| 22 |
+
"biography": 136,
|
| 23 |
+
"other": 136
|
| 24 |
+
}
|
deep_search/data_from_zhiyuan/data_for_dpo/3k_question/domain_keypoints_distribution_without_remove_dup.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/data_from_zhiyuan/data_for_dpo/3k_question/find_dup.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
def read_json(file_path):
|
| 4 |
+
"""从 JSON 文件中读取数据"""
|
| 5 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 6 |
+
data = json.load(f)
|
| 7 |
+
print(f"load from {file_path}, total item: {len(data)}")
|
| 8 |
+
return data
|
| 9 |
+
|
| 10 |
+
def read_jsonl(file_path):
|
| 11 |
+
"""从 JSON Lines 文件中读取数据"""
|
| 12 |
+
data = []
|
| 13 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 14 |
+
for line in f:
|
| 15 |
+
data.append(json.loads(line.strip()))
|
| 16 |
+
print(f"load from {file_path}, total item: {len(data)}")
|
| 17 |
+
return data
|
| 18 |
+
|
| 19 |
+
def write_json(data, file_path):
|
| 20 |
+
"""将数据写入 JSON 文件"""
|
| 21 |
+
with open(file_path, "w", encoding="utf-8") as f:
|
| 22 |
+
json.dump(data, f, ensure_ascii=False, indent=4)
|
| 23 |
+
print(f"write to {file_path}, total item: {len(data)}")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
file1 = "/share/project/sunshuang/deep_search/data_for_dpo/3k_question/17w_select_3k_for_dpo.json"
|
| 27 |
+
file2 = "/share/project/sunshuang/deep_search/data_for_dpo/3k_question/qwqsft_chosen_qwen7bsft_rejected_484.json"
|
| 28 |
+
|
| 29 |
+
data1 = read_json(file1)
|
| 30 |
+
data2 = read_json(file2)
|
| 31 |
+
|
| 32 |
+
question1 = {}
|
| 33 |
+
for item in data1:
|
| 34 |
+
question1[item["question"]] = item
|
| 35 |
+
|
| 36 |
+
# 输出question1的长度
|
| 37 |
+
print(f"question1 length: {len(question1)}")
|
| 38 |
+
|
| 39 |
+
dup_data_cnt = 0
|
| 40 |
+
|
| 41 |
+
not_dup_data = []
|
| 42 |
+
|
| 43 |
+
for item in data2:
|
| 44 |
+
if item["question"] in question1:
|
| 45 |
+
# 在QUESTION1中删除重复数据
|
| 46 |
+
dup_data_cnt += 1
|
| 47 |
+
question1.pop(item["question"])
|
| 48 |
+
print(f"dup data cnt: {dup_data_cnt}")
|
| 49 |
+
print(f"not dup data cnt: {len(not_dup_data)}")
|
| 50 |
+
|
| 51 |
+
for item in question1.values():
|
| 52 |
+
not_dup_data.append(item)
|
| 53 |
+
output_file = f"/share/project/sunshuang/deep_search/data_for_dpo/3k_question/not_dup_data_{len(not_dup_data)}.json"
|
| 54 |
+
write_json(not_dup_data, output_file)
|
| 55 |
+
|
| 56 |
+
|
deep_search/data_from_zhiyuan/data_for_dpo/3k_question/load_data.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
def read_json(file_path):
|
| 4 |
+
"""从 JSON 文件中读取数据"""
|
| 5 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 6 |
+
data = json.load(f)
|
| 7 |
+
print(f"load from {file_path}, total item: {len(data)}")
|
| 8 |
+
return data
|
| 9 |
+
|
| 10 |
+
def read_jsonl(file_path):
|
| 11 |
+
"""从 JSON Lines 文件中读取数据"""
|
| 12 |
+
data = []
|
| 13 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 14 |
+
for line in f:
|
| 15 |
+
data.append(json.loads(line.strip()))
|
| 16 |
+
print(f"load from {file_path}, total item: {len(data)}")
|
| 17 |
+
return data
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
file = "/share/project/sunshuang/deep_search/data_for_rl/tagged_domain_keypoints/merged_data_tagged_domain_keypoints_keywords_count.json"
|
| 21 |
+
data = read_json(file)
|
deep_search/data_from_zhiyuan/data_for_dpo/3k_question/source_counts.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"MultiHopRAG": 112,
|
| 3 |
+
"hotpot": 2484,
|
| 4 |
+
"musique": 167,
|
| 5 |
+
"2wiki_compositional": 237
|
| 6 |
+
}
|
deep_search/data_from_zhiyuan/data_for_rl/MultiHopRAG_only_qa.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/data_from_zhiyuan/data_for_rl/domain_counts.json
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"film": 65685,
|
| 3 |
+
"history": 21913,
|
| 4 |
+
"music": 21613,
|
| 5 |
+
"geography": 15521,
|
| 6 |
+
"sports": 7353,
|
| 7 |
+
"television": 6374,
|
| 8 |
+
"politics": 5583,
|
| 9 |
+
"literature": 2876,
|
| 10 |
+
"biography": 2521,
|
| 11 |
+
"business": 2506,
|
| 12 |
+
"technology": 1964,
|
| 13 |
+
"education": 1835,
|
| 14 |
+
"people": 1710,
|
| 15 |
+
"genealogy": 977,
|
| 16 |
+
"football": 937,
|
| 17 |
+
"entertainment": 827,
|
| 18 |
+
"media": 826,
|
| 19 |
+
"military": 638,
|
| 20 |
+
"religion": 610,
|
| 21 |
+
"aviation": 587,
|
| 22 |
+
"biology": 507,
|
| 23 |
+
"gaming": 494,
|
| 24 |
+
"law": 493,
|
| 25 |
+
"publishing": 459,
|
| 26 |
+
"royalty": 413,
|
| 27 |
+
"art": 405,
|
| 28 |
+
"comics": 393,
|
| 29 |
+
"theater": 342,
|
| 30 |
+
"family": 332,
|
| 31 |
+
"theatre": 306,
|
| 32 |
+
"government": 290,
|
| 33 |
+
"transportation": 290,
|
| 34 |
+
"motorsport": 270,
|
| 35 |
+
"baseball": 257,
|
| 36 |
+
"physics": 253,
|
| 37 |
+
"crime": 219,
|
| 38 |
+
"architecture": 218,
|
| 39 |
+
"automotive": 216,
|
| 40 |
+
"economics": 195,
|
| 41 |
+
"opera": 192,
|
| 42 |
+
"finance": 189,
|
| 43 |
+
"american football": 180,
|
| 44 |
+
"basketball": 175,
|
| 45 |
+
"philosophy": 153,
|
| 46 |
+
"astronomy": 153,
|
| 47 |
+
"automobile": 152,
|
| 48 |
+
"video games": 147,
|
| 49 |
+
"games": 147,
|
| 50 |
+
"science": 143,
|
| 51 |
+
"mythology": 141,
|
| 52 |
+
"motorsports": 136,
|
| 53 |
+
"university": 132,
|
| 54 |
+
"dance": 128,
|
| 55 |
+
"sport": 121,
|
| 56 |
+
"wrestling": 118,
|
| 57 |
+
"chemistry": 113,
|
| 58 |
+
"fashion": 112,
|
| 59 |
+
"linguistics": 107,
|
| 60 |
+
"olympics": 105,
|
| 61 |
+
"academia": 104,
|
| 62 |
+
"radio": 99,
|
| 63 |
+
"hockey": 98,
|
| 64 |
+
"medicine": 94,
|
| 65 |
+
"boxing": 94,
|
| 66 |
+
"journalism": 78,
|
| 67 |
+
"space": 76,
|
| 68 |
+
"organization": 72,
|
| 69 |
+
"culture": 72,
|
| 70 |
+
"food": 69,
|
| 71 |
+
"retail": 68,
|
| 72 |
+
"geology": 68,
|
| 73 |
+
"cricket": 62,
|
| 74 |
+
"mma": 60,
|
| 75 |
+
"cuisine": 59,
|
| 76 |
+
"psychology": 56,
|
| 77 |
+
"society": 53,
|
| 78 |
+
"mathematics": 53,
|
| 79 |
+
"company": 52,
|
| 80 |
+
"meteorology": 51,
|
| 81 |
+
"nobility": 49,
|
| 82 |
+
"language": 48,
|
| 83 |
+
"energy": 48,
|
| 84 |
+
"engineering": 47,
|
| 85 |
+
"events": 45,
|
| 86 |
+
"manga": 44,
|
| 87 |
+
"archaeology": 41,
|
| 88 |
+
"culinary": 38,
|
| 89 |
+
"tennis": 38,
|
| 90 |
+
"anime": 37,
|
| 91 |
+
"animation": 37,
|
| 92 |
+
"martial arts": 37,
|
| 93 |
+
"brewing": 37,
|
| 94 |
+
"astronautics": 35,
|
| 95 |
+
"beer": 35,
|
| 96 |
+
"banking": 33,
|
| 97 |
+
"industry": 33,
|
| 98 |
+
"health": 32,
|
| 99 |
+
"soccer": 32,
|
| 100 |
+
"nobel prize": 31,
|
| 101 |
+
"demographics": 31,
|
| 102 |
+
"poker": 31,
|
| 103 |
+
"sports betting": 31,
|
| 104 |
+
"folklore": 30,
|
| 105 |
+
"nfl": 30,
|
| 106 |
+
"anthropology": 30,
|
| 107 |
+
"sociology": 30,
|
| 108 |
+
"fiction": 28,
|
| 109 |
+
"ice hockey": 28,
|
| 110 |
+
"person": 27,
|
| 111 |
+
"ballet": 26,
|
| 112 |
+
"firearms": 26,
|
| 113 |
+
"arts": 25,
|
| 114 |
+
"comedy": 25,
|
| 115 |
+
"casino": 24,
|
| 116 |
+
"telecommunications": 24,
|
| 117 |
+
"healthcare": 24,
|
| 118 |
+
"theme parks": 24,
|
| 119 |
+
"athletics": 23,
|
| 120 |
+
"pageant": 23,
|
| 121 |
+
"amusement parks": 23,
|
| 122 |
+
"beverage": 23,
|
| 123 |
+
"environment": 22,
|
| 124 |
+
"theme park": 22,
|
| 125 |
+
"agriculture": 22,
|
| 126 |
+
"law enforcement": 21,
|
| 127 |
+
"formula one": 20,
|
| 128 |
+
"gambling": 20,
|
| 129 |
+
"real estate": 19,
|
| 130 |
+
"navy": 19,
|
| 131 |
+
"rugby": 19,
|
| 132 |
+
"design": 18,
|
| 133 |
+
"books": 17,
|
| 134 |
+
"marketing": 17,
|
| 135 |
+
"amusement_parks": 17,
|
| 136 |
+
"cryptocurrency": 17,
|
| 137 |
+
"advertising": 16,
|
| 138 |
+
"museums": 16,
|
| 139 |
+
"college football": 16,
|
| 140 |
+
"computer science": 15,
|
| 141 |
+
"alcohol": 15,
|
| 142 |
+
"restaurants": 14,
|
| 143 |
+
"golf": 14,
|
| 144 |
+
"horse racing": 13,
|
| 145 |
+
"photography": 13,
|
| 146 |
+
"pharmaceuticals": 12,
|
| 147 |
+
"philanthropy": 12,
|
| 148 |
+
"hospitality": 12,
|
| 149 |
+
"space exploration": 12,
|
| 150 |
+
"tv": 12,
|
| 151 |
+
"lottery": 12,
|
| 152 |
+
"broadcasting": 12,
|
| 153 |
+
"celebrity": 11,
|
| 154 |
+
"beauty pageant": 11,
|
| 155 |
+
"country music": 11,
|
| 156 |
+
"wwe": 11,
|
| 157 |
+
"military history": 10,
|
| 158 |
+
"event": 10,
|
| 159 |
+
"botany": 10,
|
| 160 |
+
"environmental science": 10,
|
| 161 |
+
"video game": 10,
|
| 162 |
+
"aerospace": 9,
|
| 163 |
+
"legal": 9,
|
| 164 |
+
"construction": 9,
|
| 165 |
+
"labor": 9,
|
| 166 |
+
"confectionery": 9,
|
| 167 |
+
"brewery": 8,
|
| 168 |
+
"railway": 8,
|
| 169 |
+
"hinduism": 8,
|
| 170 |
+
"internet": 8,
|
| 171 |
+
"ufc": 8,
|
| 172 |
+
"intelligence": 8,
|
| 173 |
+
"awards": 8,
|
| 174 |
+
"martial_arts": 8,
|
| 175 |
+
"theology": 8,
|
| 176 |
+
"ufology": 7,
|
| 177 |
+
"podcast": 7,
|
| 178 |
+
"stock market": 7,
|
| 179 |
+
"islam": 7,
|
| 180 |
+
"cycling": 7,
|
| 181 |
+
"criminal justice": 7,
|
| 182 |
+
"science fiction": 7,
|
| 183 |
+
"beverages": 7,
|
| 184 |
+
"racing": 7,
|
| 185 |
+
"bible": 7,
|
| 186 |
+
"terrorism": 7,
|
| 187 |
+
"christianity": 7,
|
| 188 |
+
"theme_parks": 6,
|
| 189 |
+
"accounting": 6,
|
| 190 |
+
"combat sports": 6,
|
| 191 |
+
"railroad": 6,
|
| 192 |
+
"chess": 6,
|
| 193 |
+
"disney": 6,
|
| 194 |
+
"transport": 6,
|
| 195 |
+
"musical": 6,
|
| 196 |
+
"currency": 6,
|
| 197 |
+
"art history": 5,
|
| 198 |
+
"spirituality": 5,
|
| 199 |
+
"amusement": 5,
|
| 200 |
+
"organized crime": 5,
|
| 201 |
+
"motorcycle": 5,
|
| 202 |
+
"horseracing": 5,
|
| 203 |
+
"spirits": 5,
|
| 204 |
+
"corporation": 5,
|
| 205 |
+
"esports": 5,
|
| 206 |
+
"motor racing": 5,
|
| 207 |
+
"manufacturing": 5,
|
| 208 |
+
"book": 5,
|
| 209 |
+
"fragrance": 5,
|
| 210 |
+
"international organization": 5,
|
| 211 |
+
"international organizations": 5,
|
| 212 |
+
"modeling": 4,
|
| 213 |
+
"toy": 4,
|
| 214 |
+
"telecom": 4,
|
| 215 |
+
"paleontology": 4,
|
| 216 |
+
"broadway": 4,
|
| 217 |
+
"charity": 4,
|
| 218 |
+
"role-playing games": 4,
|
| 219 |
+
"eurovision": 4,
|
| 220 |
+
"professional wrestling": 4,
|
| 221 |
+
"calendar": 4,
|
| 222 |
+
"organizations": 4,
|
| 223 |
+
"materials science": 4,
|
| 224 |
+
"figure skating": 4,
|
| 225 |
+
"automobile racing": 4,
|
| 226 |
+
"library": 4,
|
| 227 |
+
"australian football": 4,
|
| 228 |
+
"newspaper": 4,
|
| 229 |
+
"adult entertainment": 4,
|
| 230 |
+
"numismatics": 4,
|
| 231 |
+
"corporate": 4,
|
| 232 |
+
"nonprofit": 4,
|
| 233 |
+
"hotel": 4,
|
| 234 |
+
"museum": 4,
|
| 235 |
+
"social media": 4,
|
| 236 |
+
"magazine": 4,
|
| 237 |
+
"holiday": 4,
|
| 238 |
+
"careers": 4,
|
| 239 |
+
"standards": 4,
|
| 240 |
+
"mountaineering": 4,
|
| 241 |
+
"auto racing": 3,
|
| 242 |
+
"food industry": 3,
|
| 243 |
+
"performing arts": 3,
|
| 244 |
+
"amusement rides": 3,
|
| 245 |
+
"pornography": 3,
|
| 246 |
+
"statistics": 3,
|
| 247 |
+
"brands": 3,
|
| 248 |
+
"management": 3,
|
| 249 |
+
"nutrition": 3,
|
| 250 |
+
"maritime": 3,
|
| 251 |
+
"festival": 3,
|
| 252 |
+
"mixed martial arts": 3,
|
| 253 |
+
"ecology": 3,
|
| 254 |
+
"amusement park": 3,
|
| 255 |
+
"military technology": 3,
|
| 256 |
+
"research": 3,
|
| 257 |
+
"mafia": 3,
|
| 258 |
+
"defense": 3,
|
| 259 |
+
"natural disaster": 3,
|
| 260 |
+
"toys": 3,
|
| 261 |
+
"restaurant": 3,
|
| 262 |
+
"competition": 3,
|
| 263 |
+
"conservation": 3,
|
| 264 |
+
"zoology": 3,
|
| 265 |
+
"k-pop": 3,
|
| 266 |
+
"afl": 3,
|
| 267 |
+
"census": 3,
|
| 268 |
+
"mining": 3,
|
| 269 |
+
"nba": 3,
|
| 270 |
+
"paranormal": 3,
|
| 271 |
+
"naval": 3,
|
| 272 |
+
"consumer goods": 3,
|
| 273 |
+
"police": 3,
|
| 274 |
+
"music industry": 3,
|
| 275 |
+
"news": 3,
|
| 276 |
+
"cigars": 3,
|
| 277 |
+
"fast food": 2,
|
| 278 |
+
"urban planning": 2,
|
| 279 |
+
"insurance": 2,
|
| 280 |
+
"animals": 2,
|
| 281 |
+
"pharmacy": 2,
|
| 282 |
+
"occupation": 2,
|
| 283 |
+
"profession": 2,
|
| 284 |
+
"skateboarding": 2,
|
| 285 |
+
"shipping": 2,
|
| 286 |
+
"beauty pageants": 2,
|
| 287 |
+
"encyclopedia": 2,
|
| 288 |
+
"game theory": 2,
|
| 289 |
+
"oil and gas": 2,
|
| 290 |
+
"electronics": 2,
|
| 291 |
+
"parapsychology": 2,
|
| 292 |
+
"materials": 2,
|
| 293 |
+
"wine": 2,
|
| 294 |
+
"cosmetics": 2,
|
| 295 |
+
"psychiatry": 2,
|
| 296 |
+
"railroads": 2,
|
| 297 |
+
"cryptozoology": 2,
|
| 298 |
+
"cryptography": 2,
|
| 299 |
+
"text analysis": 2,
|
| 300 |
+
"cognitive science": 2,
|
| 301 |
+
"adult film": 2,
|
| 302 |
+
"bakery": 2,
|
| 303 |
+
"prize": 2,
|
| 304 |
+
"roller coasters": 2,
|
| 305 |
+
"non-profit": 2,
|
| 306 |
+
"logistics": 2,
|
| 307 |
+
"reality tv": 2,
|
| 308 |
+
"exploration": 2,
|
| 309 |
+
"professions": 2,
|
| 310 |
+
"australian rules football": 2,
|
| 311 |
+
"humanitarian": 2,
|
| 312 |
+
"heraldry": 2,
|
| 313 |
+
"disaster": 2,
|
| 314 |
+
"air force": 2,
|
| 315 |
+
"cooking": 2,
|
| 316 |
+
"dogs": 2,
|
| 317 |
+
"ethnography": 2,
|
| 318 |
+
"comic books": 2,
|
| 319 |
+
"gender": 2,
|
| 320 |
+
"natural disasters": 2,
|
| 321 |
+
"fantasy": 2,
|
| 322 |
+
"animal welfare": 2,
|
| 323 |
+
"feminism": 2,
|
| 324 |
+
"environmental protection": 2,
|
| 325 |
+
"nascar": 2,
|
| 326 |
+
"universities": 2,
|
| 327 |
+
"video_games": 2,
|
| 328 |
+
"economy": 2,
|
| 329 |
+
"occult": 2,
|
| 330 |
+
"libraries": 2,
|
| 331 |
+
"social science": 2,
|
| 332 |
+
"organisation": 2,
|
| 333 |
+
"records": 2,
|
| 334 |
+
"space technology": 2,
|
| 335 |
+
"development": 2,
|
| 336 |
+
"public health": 2,
|
| 337 |
+
"lifestyle": 2,
|
| 338 |
+
"writing systems": 2,
|
| 339 |
+
"languages": 2,
|
| 340 |
+
"european union": 2,
|
| 341 |
+
"work": 1,
|
| 342 |
+
"writing": 1,
|
| 343 |
+
"climate": 1,
|
| 344 |
+
"honors": 1,
|
| 345 |
+
"archery": 1,
|
| 346 |
+
"rail": 1,
|
| 347 |
+
"cinema": 1,
|
| 348 |
+
"space science": 1,
|
| 349 |
+
"philology": 1,
|
| 350 |
+
"retailing": 1,
|
| 351 |
+
"commerce": 1,
|
| 352 |
+
"nhl": 1,
|
| 353 |
+
"microfinance": 1,
|
| 354 |
+
"cookbook": 1,
|
| 355 |
+
"measurement": 1,
|
| 356 |
+
"professional_wrestling": 1,
|
| 357 |
+
"activism": 1,
|
| 358 |
+
"bodybuilding": 1,
|
| 359 |
+
"theme_park": 1,
|
| 360 |
+
"f1": 1,
|
| 361 |
+
"stock car racing": 1,
|
| 362 |
+
"social work": 1,
|
| 363 |
+
"star wars": 1,
|
| 364 |
+
"semiotics": 1,
|
| 365 |
+
"lgbtq": 1,
|
| 366 |
+
"clothing": 1,
|
| 367 |
+
"etymology": 1,
|
| 368 |
+
"sufism": 1,
|
| 369 |
+
"food safety": 1,
|
| 370 |
+
"rail transport": 1,
|
| 371 |
+
"african american studies": 1,
|
| 372 |
+
"propaganda": 1,
|
| 373 |
+
"conspiracy_theory": 1,
|
| 374 |
+
"cars": 1,
|
| 375 |
+
"mechanics": 1,
|
| 376 |
+
"reference": 1,
|
| 377 |
+
"astrology": 1,
|
| 378 |
+
"sailing": 1,
|
| 379 |
+
"travel": 1,
|
| 380 |
+
"videogames": 1,
|
| 381 |
+
"international law": 1,
|
| 382 |
+
"conspiracy": 1,
|
| 383 |
+
"philippines": 1,
|
| 384 |
+
"mechanical engineering": 1,
|
| 385 |
+
"water industry": 1,
|
| 386 |
+
"logic": 1,
|
| 387 |
+
"oil industry": 1,
|
| 388 |
+
"shooting": 1,
|
| 389 |
+
"tourism": 1,
|
| 390 |
+
"circus": 1,
|
| 391 |
+
"apparel": 1,
|
| 392 |
+
"boatbuilding": 1,
|
| 393 |
+
"computer security": 1,
|
| 394 |
+
"bookstores": 1,
|
| 395 |
+
"alcoholic beverages": 1,
|
| 396 |
+
"cocktail": 1,
|
| 397 |
+
"social services": 1,
|
| 398 |
+
"gymnastics": 1,
|
| 399 |
+
"general knowledge": 1,
|
| 400 |
+
"religious studies": 1,
|
| 401 |
+
"cosmology": 1,
|
| 402 |
+
"honours": 1,
|
| 403 |
+
"cannabis": 1,
|
| 404 |
+
"social justice": 1,
|
| 405 |
+
"collegiate athletics": 1,
|
| 406 |
+
"drama": 1,
|
| 407 |
+
"firefighting": 1,
|
| 408 |
+
"human rights": 1,
|
| 409 |
+
"material science": 1,
|
| 410 |
+
"acoustics": 1,
|
| 411 |
+
"pet care": 1,
|
| 412 |
+
"dairy": 1,
|
| 413 |
+
"publications": 1,
|
| 414 |
+
"supply chain management": 1,
|
| 415 |
+
"humanitarian aid": 1,
|
| 416 |
+
"nuclear": 1,
|
| 417 |
+
"security": 1,
|
| 418 |
+
"hip hop": 1,
|
| 419 |
+
"oilfield services": 1,
|
| 420 |
+
"biotechnology": 1,
|
| 421 |
+
"humanities": 1,
|
| 422 |
+
"social development": 1,
|
| 423 |
+
"aquarium": 1,
|
| 424 |
+
"video_game": 1,
|
| 425 |
+
"lgbt": 1,
|
| 426 |
+
"pageants": 1,
|
| 427 |
+
"forensics": 1,
|
| 428 |
+
"badminton": 1,
|
| 429 |
+
"gaelic football": 1,
|
| 430 |
+
"lgbtq+": 1,
|
| 431 |
+
"ice_hockey": 1,
|
| 432 |
+
"pokemon": 1,
|
| 433 |
+
"american_football": 1,
|
| 434 |
+
"packaging": 1,
|
| 435 |
+
"formula1": 1,
|
| 436 |
+
"traditions": 1,
|
| 437 |
+
"plasma science": 1,
|
| 438 |
+
"firearm": 1,
|
| 439 |
+
"political economy": 1,
|
| 440 |
+
"astronaut": 1,
|
| 441 |
+
"korean language": 1,
|
| 442 |
+
"molecular biology": 1,
|
| 443 |
+
"electrical engineering": 1,
|
| 444 |
+
"peacebuilding": 1,
|
| 445 |
+
"sumo": 1,
|
| 446 |
+
"coffee": 1,
|
| 447 |
+
"utility": 1,
|
| 448 |
+
"astronauts": 1,
|
| 449 |
+
"kickboxing": 1,
|
| 450 |
+
"whiskey": 1,
|
| 451 |
+
"ethnology": 1,
|
| 452 |
+
"ethics": 1,
|
| 453 |
+
"cultural heritage": 1,
|
| 454 |
+
"sculpture": 1,
|
| 455 |
+
"watchmaking": 1,
|
| 456 |
+
"motorcycle clubs": 1,
|
| 457 |
+
"video gaming": 1,
|
| 458 |
+
"herbal medicine": 1,
|
| 459 |
+
"kpop": 1,
|
| 460 |
+
"taxation": 1,
|
| 461 |
+
"consulting": 1,
|
| 462 |
+
"earth science": 1,
|
| 463 |
+
"spaceflight": 1,
|
| 464 |
+
"wildlife": 1,
|
| 465 |
+
"dog breeds": 1,
|
| 466 |
+
"buddhism": 1,
|
| 467 |
+
"darts": 1,
|
| 468 |
+
"differential geometry": 1,
|
| 469 |
+
"indigenous studies": 1,
|
| 470 |
+
"boating": 1,
|
| 471 |
+
"snooker": 1,
|
| 472 |
+
"amusements": 1,
|
| 473 |
+
"water": 1,
|
| 474 |
+
"branding": 1,
|
| 475 |
+
"cartoon": 1,
|
| 476 |
+
"regulation": 1,
|
| 477 |
+
"whisky": 1,
|
| 478 |
+
"sports, film": 1,
|
| 479 |
+
"fitness": 1,
|
| 480 |
+
"occupations": 1,
|
| 481 |
+
"nuclear science": 1,
|
| 482 |
+
"wildlife conservation": 1,
|
| 483 |
+
"pharmacology": 1,
|
| 484 |
+
"crafts": 1,
|
| 485 |
+
"metallurgy": 1,
|
| 486 |
+
"gastronomy": 1,
|
| 487 |
+
"physiology": 1,
|
| 488 |
+
"social entrepreneurship": 1,
|
| 489 |
+
"human sexuality": 1,
|
| 490 |
+
"beauty": 1,
|
| 491 |
+
"anarchism": 1,
|
| 492 |
+
"infrastructure": 1,
|
| 493 |
+
"tradition": 1,
|
| 494 |
+
"history, music": 1,
|
| 495 |
+
"civil rights": 1,
|
| 496 |
+
"birding": 1,
|
| 497 |
+
"communication theory": 1,
|
| 498 |
+
"ethnobotany": 1,
|
| 499 |
+
"holidays": 1,
|
| 500 |
+
"college sports": 1,
|
| 501 |
+
"franchising": 1,
|
| 502 |
+
"tabletop games": 1,
|
| 503 |
+
"soap opera": 1,
|
| 504 |
+
"auction": 1,
|
| 505 |
+
"sexuality": 1,
|
| 506 |
+
"author": 1,
|
| 507 |
+
"actors": 1,
|
| 508 |
+
"graphic design": 1,
|
| 509 |
+
"pseudoscience": 1,
|
| 510 |
+
"hindustani music": 1,
|
| 511 |
+
"pottery": 1,
|
| 512 |
+
"behavioral science": 1,
|
| 513 |
+
"grocery": 1,
|
| 514 |
+
"environmental disasters": 1,
|
| 515 |
+
"environmentalism": 1,
|
| 516 |
+
"online gaming": 1,
|
| 517 |
+
"epidemiology": 1,
|
| 518 |
+
"alpine skiing": 1,
|
| 519 |
+
"gardening": 1,
|
| 520 |
+
"paralympics": 1,
|
| 521 |
+
"food and drink": 1,
|
| 522 |
+
"urban legend": 1,
|
| 523 |
+
"online betting": 1,
|
| 524 |
+
"streaming platforms": 1,
|
| 525 |
+
"artificial intelligence": 1,
|
| 526 |
+
"betting": 1,
|
| 527 |
+
"fantasy sports": 1,
|
| 528 |
+
"climate change": 1,
|
| 529 |
+
"climate science": 1,
|
| 530 |
+
"parenting": 1,
|
| 531 |
+
"global health": 1,
|
| 532 |
+
"environmental science and technology": 1,
|
| 533 |
+
"blues": 1,
|
| 534 |
+
"typography": 1,
|
| 535 |
+
"ontology": 1,
|
| 536 |
+
"japanese language": 1,
|
| 537 |
+
"genetics": 1,
|
| 538 |
+
"perfume": 1,
|
| 539 |
+
"international relations": 1,
|
| 540 |
+
"country": 1,
|
| 541 |
+
"publication": 1,
|
| 542 |
+
"public_relations": 1,
|
| 543 |
+
"peace": 1,
|
| 544 |
+
"demography": 1,
|
| 545 |
+
"tax": 1
|
| 546 |
+
}
|
deep_search/data_from_zhiyuan/data_for_rl/jsonl_to_json.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
def jsonl_to_json(jsonl_file, json_file):
|
| 4 |
+
"""
|
| 5 |
+
将 JSONL 文件转换为 JSON 文件。
|
| 6 |
+
|
| 7 |
+
:param jsonl_file: 输入的 JSONL 文件路径
|
| 8 |
+
:param json_file: 输出的 JSON 文件路径
|
| 9 |
+
"""
|
| 10 |
+
# 用于存储所有的 JSON 对象
|
| 11 |
+
data_list = []
|
| 12 |
+
|
| 13 |
+
# 打开 JSONL 文件并逐行读取
|
| 14 |
+
with open(jsonl_file, 'r', encoding='utf-8') as f:
|
| 15 |
+
for line in f:
|
| 16 |
+
# 解析每一行的 JSON 数据
|
| 17 |
+
json_obj = json.loads(line.strip())
|
| 18 |
+
data_list.append(json_obj)
|
| 19 |
+
|
| 20 |
+
# 将数据写入 JSON 文件
|
| 21 |
+
with open(json_file, 'w', encoding='utf-8') as f:
|
| 22 |
+
json.dump(data_list, f, ensure_ascii=False, indent=4)
|
| 23 |
+
|
| 24 |
+
# 示例调用
|
| 25 |
+
input_file = "/share/project/sunshuang/deep_search/data_for_rl/musique_ans_v1.0_train_only_qa.jsonl"
|
| 26 |
+
output_file = "/share/project/sunshuang/deep_search/data_for_rl/musique_ans_v1.0_train_only_qa.json"
|
| 27 |
+
jsonl_to_json(input_file, output_file)
|
deep_search/data_from_zhiyuan/data_for_rl/merge_data.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
def merge_json_files(file_paths, sources, output_file):
|
| 4 |
+
"""
|
| 5 |
+
合并多个 JSON 文件,并为每条数据添加 source 字段。
|
| 6 |
+
|
| 7 |
+
:param file_paths: 输入的 JSON 文件路径列表
|
| 8 |
+
:param sources: 每个文件对应的 source 字段值
|
| 9 |
+
:param output_file: 输出的合并后的 JSON 文件路径
|
| 10 |
+
"""
|
| 11 |
+
# 检查输入参数是否匹配
|
| 12 |
+
if len(file_paths) != len(sources):
|
| 13 |
+
raise ValueError("file_paths 和 sources 的长度必须相同")
|
| 14 |
+
|
| 15 |
+
# 存储合并后的数据
|
| 16 |
+
merged_data = []
|
| 17 |
+
|
| 18 |
+
# 遍历每个文件及其对应的 source
|
| 19 |
+
for file_path, source in zip(file_paths, sources):
|
| 20 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 21 |
+
data = json.load(f) # 加载 JSON 数据
|
| 22 |
+
for item in data:
|
| 23 |
+
item['source'] = source # 添加 source 字段
|
| 24 |
+
merged_data.extend(data) # 合并数据
|
| 25 |
+
|
| 26 |
+
# 将合并后的数据写入输出文件
|
| 27 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 28 |
+
json.dump(merged_data, f, ensure_ascii=False, indent=4)
|
| 29 |
+
|
| 30 |
+
# 示例调用
|
| 31 |
+
file_paths = [
|
| 32 |
+
'/share/project/sunshuang/deep_search/data_for_rl/2wiki_train_only_compositional_qa.json',
|
| 33 |
+
'/share/project/sunshuang/deep_search/data_for_rl/hotpot_train_v1.1_only_qa.json',
|
| 34 |
+
'/share/project/sunshuang/deep_search/data_for_rl/MultiHopRAG_only_qa.json',
|
| 35 |
+
'/share/project/sunshuang/deep_search/data_for_rl/musique_ans_v1.0_train_only_qa.json'
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
sources = [
|
| 39 |
+
'2wiki_compositional',
|
| 40 |
+
'hotpot',
|
| 41 |
+
'MultiHopRAG',
|
| 42 |
+
'musique'
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
output_file = '/share/project/sunshuang/deep_search/data_for_rl/merged_data.json'
|
| 46 |
+
|
| 47 |
+
merge_json_files(file_paths, sources, output_file)
|
deep_search/data_from_zhiyuan/data_for_rl/musique_ans_v1.0_train_only_qa.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/data_from_zhiyuan/data_for_rl/musique_ans_v1.0_train_only_qa.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/domain_counts.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"film": 500,
|
| 3 |
+
"television": 500,
|
| 4 |
+
"music": 500,
|
| 5 |
+
"history": 500,
|
| 6 |
+
"geography": 500,
|
| 7 |
+
"politics": 500,
|
| 8 |
+
"sports": 500,
|
| 9 |
+
"other": 500
|
| 10 |
+
}
|
deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/domain_keypoints_distribution_without_remove_dup.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/key_points_count.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/data_from_zhiyuan/data_for_rl/musique_tagged/source_counts.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"musique": 4000
|
| 3 |
+
}
|
deep_search/data_from_zhiyuan/data_for_rl/select_source.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
def read_json(file_path):
|
| 4 |
+
"""从 JSON 文件中读取数据"""
|
| 5 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 6 |
+
data = json.load(f)
|
| 7 |
+
print(f"load from {file_path}, total item: {len(data)}")
|
| 8 |
+
return data
|
| 9 |
+
|
| 10 |
+
def read_jsonl(file_path):
|
| 11 |
+
"""从 JSON Lines 文件中读取数据"""
|
| 12 |
+
data = []
|
| 13 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 14 |
+
for line in f:
|
| 15 |
+
data.append(json.loads(line.strip()))
|
| 16 |
+
print(f"load from {file_path}, total item: {len(data)}")
|
| 17 |
+
return data
|
| 18 |
+
|
| 19 |
+
def write_json(data, file_path):
|
| 20 |
+
"""将数据写入 JSON 文件"""
|
| 21 |
+
with open(file_path, "w", encoding="utf-8") as f:
|
| 22 |
+
json.dump(data, f, ensure_ascii=False, indent=4)
|
| 23 |
+
print(f"write to {file_path}, total item: {len(data)}")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
input_file = "/share/project/sunshuang/deep_search/data_for_rl/tagged_domain_keypoints/merged_data_tagged_domain_keypoints_keywords_count.json"
|
| 27 |
+
output_file = "/share/project/sunshuang/deep_search/data_for_rl/musique_tagged/musique_tagged_domain_keypoints_keywords_count.json"
|
| 28 |
+
data = read_json(input_file)
|
| 29 |
+
|
| 30 |
+
selected_data = []
|
| 31 |
+
for item in data:
|
| 32 |
+
if item["source"] == "musique":
|
| 33 |
+
selected_data.append(item)
|
| 34 |
+
|
| 35 |
+
write_json(selected_data, output_file)
|
deep_search/data_from_zhiyuan/data_for_rl/source_counts.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"2wiki_compositional": 80860,
|
| 3 |
+
"hotpot": 72991,
|
| 4 |
+
"MultiHopRAG": 2556,
|
| 5 |
+
"musique": 19938
|
| 6 |
+
}
|
deep_search/data_from_zhiyuan/data_for_rl/tagged_domain_keypoints/domain_counts.json
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architecture": 206,
|
| 3 |
+
"technology": 206,
|
| 4 |
+
"education": 206,
|
| 5 |
+
"baseball": 206,
|
| 6 |
+
"film": 206,
|
| 7 |
+
"literature": 205,
|
| 8 |
+
"physics": 205,
|
| 9 |
+
"crime": 205,
|
| 10 |
+
"aviation": 205,
|
| 11 |
+
"genealogy": 205,
|
| 12 |
+
"publishing": 205,
|
| 13 |
+
"theater": 205,
|
| 14 |
+
"history": 205,
|
| 15 |
+
"politics": 205,
|
| 16 |
+
"motorsport": 205,
|
| 17 |
+
"music": 205,
|
| 18 |
+
"television": 205,
|
| 19 |
+
"sports": 205,
|
| 20 |
+
"gaming": 205,
|
| 21 |
+
"theatre": 205,
|
| 22 |
+
"art": 205,
|
| 23 |
+
"military": 205,
|
| 24 |
+
"comics": 205,
|
| 25 |
+
"entertainment": 205,
|
| 26 |
+
"business": 205,
|
| 27 |
+
"family": 205,
|
| 28 |
+
"biology": 205,
|
| 29 |
+
"people": 205,
|
| 30 |
+
"government": 205,
|
| 31 |
+
"transportation": 205,
|
| 32 |
+
"media": 205,
|
| 33 |
+
"football": 205,
|
| 34 |
+
"religion": 205,
|
| 35 |
+
"biography": 205,
|
| 36 |
+
"royalty": 205,
|
| 37 |
+
"law": 205,
|
| 38 |
+
"automotive": 205,
|
| 39 |
+
"geography": 205,
|
| 40 |
+
"other": 205
|
| 41 |
+
}
|
deep_search/data_from_zhiyuan/data_for_rl/tagged_domain_keypoints/domain_keypoints_distribution_without_remove_dup.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/data_from_zhiyuan/data_for_rl/tagged_domain_keypoints/source_counts.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"hotpot": 6473,
|
| 3 |
+
"musique": 569,
|
| 4 |
+
"MultiHopRAG": 214,
|
| 5 |
+
"2wiki_compositional": 744
|
| 6 |
+
}
|
deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_2.log
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [168, 143, 115, 86]
|
| 3 |
+
step_list: [168, 143, 115, 86]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/eval
|
| 7 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/realqa
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval
|
| 11 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/realqa
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [0, 4, 5, 6, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=5,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 26 |
+
available_gpus: [0, 4, 7]
|
| 27 |
+
The following command is about to run:
|
| 28 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,4 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 29 |
+
available_gpus: [4, 7]
|
| 30 |
+
The following command is about to run:
|
| 31 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 32 |
+
Wish me good luck!
|
deep_search/sft/3-28_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829_doc_by_itself_QwQ-32B_3.log
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [168, 143, 115, 86]
|
| 3 |
+
step_list: [168, 143, 115, 86]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-168) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/eval
|
| 7 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/168/realqa
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-143) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/eval
|
| 11 |
+
skip evaluated model: JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/143/realqa
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-115 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/115/realqa/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/eval/inf.log 2>&1 &
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:9446#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_merged_1174_syn_long_359_sft_1533_zh_296_sft_1829/86/realqa/inf.log 2>&1 &
|
| 30 |
+
Wish me good luck!
|
deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_2.log
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [102, 86, 69, 51]
|
| 3 |
+
step_list: [102, 86, 69, 51]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia/inf.log 2>&1 &
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [0, 1, 2, 3, 6, 7]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia/inf.log 2>&1 &
|
| 20 |
+
The following command is about to run:
|
| 21 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia/inf.log 2>&1 &
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia/inf.log 2>&1 &
|
| 24 |
+
available_gpus: [0, 1, 2, 3, 6, 7]
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia/inf.log 2>&1 &
|
| 27 |
+
Wish me good luck!
|
deep_search/sft/4-1_1.1k_cleaned_data_1097_doc_by_itself_QwQ-32B_3.log
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [102, 86, 69, 51]
|
| 3 |
+
step_list: [102, 86, 69, 51]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia_level3/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia_level3/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia_level3/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia_level3/inf.log 2>&1 &
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [1, 3, 6, 7]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=1,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-102 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/102/gaia_level3/inf.log 2>&1 &
|
| 20 |
+
The following command is about to run:
|
| 21 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-86 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/86/gaia_level3/inf.log 2>&1 &
|
| 22 |
+
available_gpus: []
|
| 23 |
+
available_gpus: []
|
| 24 |
+
available_gpus: []
|
| 25 |
+
available_gpus: []
|
| 26 |
+
available_gpus: []
|
| 27 |
+
available_gpus: []
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: [6, 7]
|
| 35 |
+
The following command is about to run:
|
| 36 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-69 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/69/gaia_level3/inf.log 2>&1 &
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: [1, 3]
|
| 39 |
+
The following command is about to run:
|
| 40 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=1,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name gaia_level3 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia_level3 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/checkpoint-51 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17755#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_1.1k_cleaned_data_1097/51/gaia_level3/inf.log 2>&1 &
|
| 41 |
+
Wish me good luck!
|
deep_search/sft/4-1_1.1k_cleaned_data_871_doc_by_itself_QwQ-32B_2.log
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
step_list: ['55']
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500/inf.log 2>&1 &
|
| 7 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa/inf.log 2>&1 &
|
| 8 |
+
All checkpoints exist. Wait for runing...
|
| 9 |
+
available_gpus: [0, 1, 6, 7]
|
| 10 |
+
The following command is about to run:
|
| 11 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500/inf.log 2>&1 &
|
| 12 |
+
The following command is about to run:
|
| 13 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name realqa --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:2284#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:2#ACC:4_no_error_data_871/55/realqa/inf.log 2>&1 &
|
| 14 |
+
Wish me good luck!
|
deep_search/sft/4-24__doc_by_itself__add_math871_after_search.log
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Traceback (most recent call last):
|
| 2 |
+
File "/opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py", line 176, in <module>
|
| 3 |
+
ckpt = sys.argv[1]
|
| 4 |
+
IndexError: list index out of range
|
deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_Qwen2.5-7B-Instruct_first_math.log
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [78, 68, 55, 41, 27, 13]
|
| 2 |
+
step_list: [78, 68, 55, 41, 27, 13]
|
| 3 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime/inf.log 2>&1 &
|
| 9 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/aime/inf.log 2>&1 &
|
| 15 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-27) to exist...
|
| 16 |
+
The checkpoint exists. Waiting for running...
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/aime/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-13) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/aime/inf.log 2>&1 &
|
| 21 |
+
All checkpoints exist. Wait for runing...
|
| 22 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime/inf.log 2>&1 &
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime/inf.log 2>&1 &
|
| 27 |
+
The following command is about to run:
|
| 28 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime/inf.log 2>&1 &
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 37 |
+
The following command is about to run:
|
| 38 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/aime/inf.log 2>&1 &
|
| 39 |
+
The following command is about to run:
|
| 40 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/27/aime/inf.log 2>&1 &
|
| 41 |
+
The following command is about to run:
|
| 42 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-13 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:30936#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/13/aime/inf.log 2>&1 &
|
| 43 |
+
Wish me good luck!
|
deep_search/sft/4-24_math_qwq_4524_selected_add_prompt_871_doc_by_itself_qwen7b_sft_871_checkpoint-78_add_math871_after_search_2.log
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
step_list: [78, 68, 55, 41]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
skip evaluated model: JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/78/aime
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime/inf.log 2>&1 &
|
| 13 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-41) to exist...
|
| 14 |
+
The checkpoint exists. Waiting for running...
|
| 15 |
+
skip evaluated model: JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/41/aime
|
| 16 |
+
All checkpoints exist. Wait for runing...
|
| 17 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 18 |
+
The following command is about to run:
|
| 19 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/68/aime/inf.log 2>&1 &
|
| 20 |
+
The following command is about to run:
|
| 21 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16641#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/55/aime/inf.log 2>&1 &
|
| 22 |
+
Wish me good luck!
|
deep_search/sft/4-24_no_error_data_871_doc_by_itself_QwQ-32B.log
ADDED
|
File without changes
|
deep_search/sft/4-24_no_error_data_871_doc_by_itself_Qwen2.5-7B-Instruct_add_math871.log
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [162, 136, 109, 81]
|
| 3 |
+
step_list: [162, 136, 109, 81]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-162) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-162 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/eval/inf.log 2>&1 &
|
| 7 |
+
skip evaluated model: JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/aime
|
| 8 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-136) to exist...
|
| 9 |
+
The checkpoint exists. Waiting for running...
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-136 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/eval/inf.log 2>&1 &
|
| 11 |
+
skip evaluated model: JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/aime
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-109) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-109 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/eval/inf.log 2>&1 &
|
| 15 |
+
skip evaluated model: JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/aime
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-81) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/eval/inf.log 2>&1 &
|
| 19 |
+
skip evaluated model: JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/aime
|
| 20 |
+
All checkpoints exist. Wait for runing...
|
| 21 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 22 |
+
The following command is about to run:
|
| 23 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-162 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/162/eval/inf.log 2>&1 &
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-136 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/136/eval/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-109 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/109/eval/inf.log 2>&1 &
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: []
|
| 116 |
+
available_gpus: []
|
| 117 |
+
available_gpus: []
|
| 118 |
+
available_gpus: []
|
| 119 |
+
available_gpus: []
|
| 120 |
+
available_gpus: []
|
| 121 |
+
available_gpus: []
|
| 122 |
+
available_gpus: []
|
| 123 |
+
available_gpus: []
|
| 124 |
+
available_gpus: []
|
| 125 |
+
available_gpus: []
|
| 126 |
+
available_gpus: []
|
| 127 |
+
available_gpus: []
|
| 128 |
+
available_gpus: []
|
| 129 |
+
available_gpus: []
|
| 130 |
+
available_gpus: []
|
| 131 |
+
available_gpus: []
|
| 132 |
+
available_gpus: []
|
| 133 |
+
available_gpus: []
|
| 134 |
+
available_gpus: []
|
| 135 |
+
available_gpus: []
|
| 136 |
+
available_gpus: []
|
| 137 |
+
available_gpus: []
|
| 138 |
+
available_gpus: []
|
| 139 |
+
available_gpus: []
|
| 140 |
+
available_gpus: []
|
| 141 |
+
available_gpus: []
|
| 142 |
+
available_gpus: []
|
| 143 |
+
available_gpus: []
|
| 144 |
+
available_gpus: []
|
| 145 |
+
available_gpus: []
|
| 146 |
+
available_gpus: []
|
| 147 |
+
available_gpus: []
|
| 148 |
+
available_gpus: []
|
| 149 |
+
available_gpus: []
|
| 150 |
+
available_gpus: []
|
| 151 |
+
available_gpus: []
|
| 152 |
+
available_gpus: []
|
| 153 |
+
available_gpus: []
|
| 154 |
+
available_gpus: []
|
| 155 |
+
available_gpus: []
|
| 156 |
+
available_gpus: [4, 5]
|
| 157 |
+
The following command is about to run:
|
| 158 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-81 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:11361#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_no_error_data_871/81/eval/inf.log 2>&1 &
|
| 159 |
+
Wish me good luck!
|
deep_search/sft/4-25_no_error_data_871_doc_by_itself_QwQ-32B.log
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nohup: ignoring input
|
| 2 |
+
step_list: [78, 68, 54, 40, 27, 13]
|
| 3 |
+
step_list: [78, 68, 54]
|
| 4 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-78) to exist...
|
| 5 |
+
The checkpoint exists. Waiting for running...
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/eval/inf.log 2>&1 &
|
| 7 |
+
skip evaluated model: JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/gaia
|
| 8 |
+
skip evaluated model: JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/aime
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/frames/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/eval/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/gaia/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/aime/inf.log 2>&1 &
|
| 15 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/frames/inf.log 2>&1 &
|
| 16 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54) to exist...
|
| 17 |
+
The checkpoint exists. Waiting for running...
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/eval/inf.log 2>&1 &
|
| 19 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/gaia/inf.log 2>&1 &
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/aime/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/frames/inf.log 2>&1 &
|
| 22 |
+
All checkpoints exist. Wait for runing...
|
| 23 |
+
available_gpus: [4, 5, 6, 7]
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/eval/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/78/frames/inf.log 2>&1 &
|
| 28 |
+
available_gpus: []
|
| 29 |
+
available_gpus: []
|
| 30 |
+
available_gpus: []
|
| 31 |
+
available_gpus: []
|
| 32 |
+
available_gpus: []
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: []
|
| 57 |
+
available_gpus: []
|
| 58 |
+
available_gpus: []
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: []
|
| 85 |
+
available_gpus: []
|
| 86 |
+
available_gpus: []
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: []
|
| 89 |
+
available_gpus: []
|
| 90 |
+
available_gpus: []
|
| 91 |
+
available_gpus: []
|
| 92 |
+
available_gpus: []
|
| 93 |
+
available_gpus: []
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: []
|
| 98 |
+
available_gpus: []
|
| 99 |
+
available_gpus: []
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
| 115 |
+
available_gpus: []
|
| 116 |
+
available_gpus: []
|
| 117 |
+
available_gpus: []
|
| 118 |
+
available_gpus: []
|
| 119 |
+
available_gpus: []
|
| 120 |
+
available_gpus: []
|
| 121 |
+
available_gpus: []
|
| 122 |
+
available_gpus: []
|
| 123 |
+
available_gpus: []
|
| 124 |
+
available_gpus: []
|
| 125 |
+
available_gpus: []
|
| 126 |
+
available_gpus: []
|
| 127 |
+
available_gpus: []
|
| 128 |
+
available_gpus: []
|
| 129 |
+
available_gpus: []
|
| 130 |
+
available_gpus: []
|
| 131 |
+
available_gpus: []
|
| 132 |
+
available_gpus: []
|
| 133 |
+
available_gpus: []
|
| 134 |
+
available_gpus: []
|
| 135 |
+
available_gpus: []
|
| 136 |
+
available_gpus: []
|
| 137 |
+
available_gpus: []
|
| 138 |
+
available_gpus: []
|
| 139 |
+
available_gpus: []
|
| 140 |
+
available_gpus: []
|
| 141 |
+
available_gpus: []
|
| 142 |
+
available_gpus: []
|
| 143 |
+
available_gpus: []
|
| 144 |
+
available_gpus: []
|
| 145 |
+
available_gpus: []
|
| 146 |
+
available_gpus: []
|
| 147 |
+
available_gpus: []
|
| 148 |
+
available_gpus: []
|
| 149 |
+
available_gpus: []
|
| 150 |
+
available_gpus: []
|
| 151 |
+
available_gpus: []
|
| 152 |
+
available_gpus: []
|
| 153 |
+
available_gpus: []
|
| 154 |
+
available_gpus: []
|
| 155 |
+
available_gpus: []
|
| 156 |
+
available_gpus: []
|
| 157 |
+
available_gpus: []
|
| 158 |
+
available_gpus: []
|
| 159 |
+
available_gpus: []
|
| 160 |
+
available_gpus: []
|
| 161 |
+
available_gpus: []
|
| 162 |
+
available_gpus: []
|
| 163 |
+
available_gpus: []
|
| 164 |
+
available_gpus: []
|
| 165 |
+
available_gpus: []
|
| 166 |
+
available_gpus: []
|
| 167 |
+
available_gpus: []
|
| 168 |
+
available_gpus: []
|
| 169 |
+
available_gpus: []
|
| 170 |
+
available_gpus: []
|
| 171 |
+
available_gpus: []
|
| 172 |
+
available_gpus: []
|
| 173 |
+
available_gpus: []
|
| 174 |
+
available_gpus: []
|
| 175 |
+
available_gpus: []
|
| 176 |
+
available_gpus: []
|
| 177 |
+
available_gpus: []
|
| 178 |
+
available_gpus: []
|
| 179 |
+
available_gpus: []
|
| 180 |
+
available_gpus: []
|
| 181 |
+
available_gpus: []
|
| 182 |
+
available_gpus: []
|
| 183 |
+
available_gpus: []
|
| 184 |
+
available_gpus: []
|
| 185 |
+
available_gpus: []
|
| 186 |
+
available_gpus: []
|
| 187 |
+
available_gpus: []
|
| 188 |
+
available_gpus: []
|
| 189 |
+
available_gpus: []
|
| 190 |
+
available_gpus: []
|
| 191 |
+
available_gpus: []
|
| 192 |
+
available_gpus: []
|
| 193 |
+
available_gpus: []
|
| 194 |
+
available_gpus: []
|
| 195 |
+
available_gpus: []
|
| 196 |
+
available_gpus: []
|
| 197 |
+
available_gpus: []
|
| 198 |
+
available_gpus: []
|
| 199 |
+
available_gpus: []
|
| 200 |
+
available_gpus: []
|
| 201 |
+
available_gpus: []
|
| 202 |
+
available_gpus: []
|
| 203 |
+
available_gpus: []
|
| 204 |
+
available_gpus: []
|
| 205 |
+
available_gpus: []
|
| 206 |
+
available_gpus: []
|
| 207 |
+
available_gpus: []
|
| 208 |
+
available_gpus: []
|
| 209 |
+
available_gpus: [4, 5]
|
| 210 |
+
The following command is about to run:
|
| 211 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/eval/inf.log 2>&1 &
|
| 212 |
+
available_gpus: [4, 5]
|
| 213 |
+
The following command is about to run:
|
| 214 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/gaia/inf.log 2>&1 &
|
| 215 |
+
available_gpus: []
|
| 216 |
+
available_gpus: []
|
| 217 |
+
available_gpus: []
|
| 218 |
+
available_gpus: []
|
| 219 |
+
available_gpus: []
|
| 220 |
+
available_gpus: []
|
| 221 |
+
available_gpus: []
|
| 222 |
+
available_gpus: []
|
| 223 |
+
available_gpus: []
|
| 224 |
+
available_gpus: []
|
| 225 |
+
available_gpus: []
|
| 226 |
+
available_gpus: []
|
| 227 |
+
available_gpus: []
|
| 228 |
+
available_gpus: []
|
| 229 |
+
available_gpus: []
|
| 230 |
+
available_gpus: []
|
| 231 |
+
available_gpus: []
|
| 232 |
+
available_gpus: []
|
| 233 |
+
available_gpus: []
|
| 234 |
+
available_gpus: []
|
| 235 |
+
available_gpus: []
|
| 236 |
+
available_gpus: []
|
| 237 |
+
available_gpus: []
|
| 238 |
+
available_gpus: []
|
| 239 |
+
available_gpus: []
|
| 240 |
+
available_gpus: []
|
| 241 |
+
available_gpus: []
|
| 242 |
+
available_gpus: []
|
| 243 |
+
available_gpus: []
|
| 244 |
+
available_gpus: []
|
| 245 |
+
available_gpus: []
|
| 246 |
+
available_gpus: []
|
| 247 |
+
available_gpus: []
|
| 248 |
+
available_gpus: []
|
| 249 |
+
available_gpus: []
|
| 250 |
+
available_gpus: []
|
| 251 |
+
available_gpus: []
|
| 252 |
+
available_gpus: []
|
| 253 |
+
available_gpus: []
|
| 254 |
+
available_gpus: []
|
| 255 |
+
available_gpus: []
|
| 256 |
+
available_gpus: []
|
| 257 |
+
available_gpus: []
|
| 258 |
+
available_gpus: []
|
| 259 |
+
available_gpus: []
|
| 260 |
+
available_gpus: []
|
| 261 |
+
available_gpus: []
|
| 262 |
+
available_gpus: []
|
| 263 |
+
available_gpus: []
|
| 264 |
+
available_gpus: []
|
| 265 |
+
available_gpus: []
|
| 266 |
+
available_gpus: []
|
| 267 |
+
available_gpus: []
|
| 268 |
+
available_gpus: []
|
| 269 |
+
available_gpus: []
|
| 270 |
+
available_gpus: []
|
| 271 |
+
available_gpus: []
|
| 272 |
+
available_gpus: []
|
| 273 |
+
available_gpus: []
|
| 274 |
+
available_gpus: []
|
| 275 |
+
available_gpus: []
|
| 276 |
+
available_gpus: []
|
| 277 |
+
available_gpus: [6, 7]
|
| 278 |
+
The following command is about to run:
|
| 279 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/aime/inf.log 2>&1 &
|
| 280 |
+
available_gpus: [6, 7]
|
| 281 |
+
The following command is about to run:
|
| 282 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/68/frames/inf.log 2>&1 &
|
| 283 |
+
available_gpus: []
|
| 284 |
+
available_gpus: [6]
|
| 285 |
+
available_gpus: [6]
|
| 286 |
+
available_gpus: [6]
|
| 287 |
+
available_gpus: [6]
|
| 288 |
+
available_gpus: [6]
|
| 289 |
+
available_gpus: [6]
|
| 290 |
+
available_gpus: [6]
|
| 291 |
+
available_gpus: [6]
|
| 292 |
+
available_gpus: [6]
|
| 293 |
+
available_gpus: [6]
|
| 294 |
+
available_gpus: [6]
|
| 295 |
+
available_gpus: [6]
|
| 296 |
+
available_gpus: [6]
|
| 297 |
+
available_gpus: [6]
|
| 298 |
+
available_gpus: [6]
|
| 299 |
+
available_gpus: [6]
|
| 300 |
+
available_gpus: [6]
|
| 301 |
+
available_gpus: [6]
|
| 302 |
+
available_gpus: [6]
|
| 303 |
+
available_gpus: [6]
|
| 304 |
+
available_gpus: [6]
|
| 305 |
+
available_gpus: [6]
|
| 306 |
+
available_gpus: [6]
|
| 307 |
+
available_gpus: [6]
|
| 308 |
+
available_gpus: [6]
|
| 309 |
+
available_gpus: [6]
|
| 310 |
+
available_gpus: [6]
|
| 311 |
+
available_gpus: [6]
|
| 312 |
+
available_gpus: [6]
|
| 313 |
+
available_gpus: [6]
|
| 314 |
+
available_gpus: [6]
|
| 315 |
+
available_gpus: [6]
|
| 316 |
+
available_gpus: [6]
|
| 317 |
+
available_gpus: [6]
|
| 318 |
+
available_gpus: [6]
|
| 319 |
+
available_gpus: [6]
|
| 320 |
+
available_gpus: [6]
|
| 321 |
+
available_gpus: [6]
|
| 322 |
+
available_gpus: [6]
|
| 323 |
+
available_gpus: [6]
|
| 324 |
+
available_gpus: [6]
|
| 325 |
+
available_gpus: [6]
|
| 326 |
+
available_gpus: [6]
|
| 327 |
+
available_gpus: [6]
|
| 328 |
+
available_gpus: [6]
|
| 329 |
+
available_gpus: [6]
|
| 330 |
+
available_gpus: [6]
|
| 331 |
+
available_gpus: [6]
|
| 332 |
+
available_gpus: [6]
|
| 333 |
+
available_gpus: [6]
|
| 334 |
+
available_gpus: [6]
|
| 335 |
+
available_gpus: [6]
|
| 336 |
+
available_gpus: [6]
|
| 337 |
+
available_gpus: [6]
|
| 338 |
+
available_gpus: [6]
|
| 339 |
+
available_gpus: [6]
|
| 340 |
+
available_gpus: [6]
|
| 341 |
+
available_gpus: [6]
|
| 342 |
+
available_gpus: [6]
|
| 343 |
+
available_gpus: [6]
|
| 344 |
+
available_gpus: [6]
|
| 345 |
+
available_gpus: [6]
|
| 346 |
+
available_gpus: [6]
|
| 347 |
+
available_gpus: [6]
|
| 348 |
+
available_gpus: [6]
|
| 349 |
+
available_gpus: [4, 5, 6]
|
| 350 |
+
The following command is about to run:
|
| 351 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/eval/inf.log 2>&1 &
|
| 352 |
+
available_gpus: [6]
|
| 353 |
+
available_gpus: [6]
|
| 354 |
+
available_gpus: [6]
|
| 355 |
+
available_gpus: [6]
|
| 356 |
+
available_gpus: [6]
|
| 357 |
+
available_gpus: [6]
|
| 358 |
+
available_gpus: [6]
|
| 359 |
+
available_gpus: [6]
|
| 360 |
+
available_gpus: [6]
|
| 361 |
+
available_gpus: [6]
|
| 362 |
+
available_gpus: [6]
|
| 363 |
+
available_gpus: [6]
|
| 364 |
+
available_gpus: [6]
|
| 365 |
+
available_gpus: [6]
|
| 366 |
+
available_gpus: [6]
|
| 367 |
+
available_gpus: [6]
|
| 368 |
+
available_gpus: [6]
|
| 369 |
+
available_gpus: [6]
|
| 370 |
+
available_gpus: [6]
|
| 371 |
+
available_gpus: [6]
|
| 372 |
+
available_gpus: [6]
|
| 373 |
+
available_gpus: [6]
|
| 374 |
+
available_gpus: [6]
|
| 375 |
+
available_gpus: [6]
|
| 376 |
+
available_gpus: [6]
|
| 377 |
+
available_gpus: [6]
|
| 378 |
+
available_gpus: [6]
|
| 379 |
+
available_gpus: [6]
|
| 380 |
+
available_gpus: [6]
|
| 381 |
+
available_gpus: [6]
|
| 382 |
+
available_gpus: [6]
|
| 383 |
+
available_gpus: [6]
|
| 384 |
+
available_gpus: [6]
|
| 385 |
+
available_gpus: [6]
|
| 386 |
+
available_gpus: [6]
|
| 387 |
+
available_gpus: [6]
|
| 388 |
+
available_gpus: [6]
|
| 389 |
+
available_gpus: [6]
|
| 390 |
+
available_gpus: [6]
|
| 391 |
+
available_gpus: [6]
|
| 392 |
+
available_gpus: [6]
|
| 393 |
+
available_gpus: [6]
|
| 394 |
+
available_gpus: [6]
|
| 395 |
+
available_gpus: [6]
|
| 396 |
+
available_gpus: [6]
|
| 397 |
+
available_gpus: [6]
|
| 398 |
+
available_gpus: [6]
|
| 399 |
+
available_gpus: [6]
|
| 400 |
+
available_gpus: [6]
|
| 401 |
+
available_gpus: [6]
|
| 402 |
+
available_gpus: [6]
|
| 403 |
+
available_gpus: [6]
|
| 404 |
+
available_gpus: [6]
|
| 405 |
+
available_gpus: [6]
|
| 406 |
+
available_gpus: [6]
|
| 407 |
+
available_gpus: [6]
|
| 408 |
+
available_gpus: [6]
|
| 409 |
+
available_gpus: [6]
|
| 410 |
+
available_gpus: [6]
|
| 411 |
+
available_gpus: [6]
|
| 412 |
+
available_gpus: [6]
|
| 413 |
+
available_gpus: [6]
|
| 414 |
+
available_gpus: [6]
|
| 415 |
+
available_gpus: [6]
|
| 416 |
+
available_gpus: [6]
|
| 417 |
+
available_gpus: [6]
|
| 418 |
+
available_gpus: [6]
|
| 419 |
+
available_gpus: [6]
|
| 420 |
+
available_gpus: [6]
|
| 421 |
+
available_gpus: [6]
|
| 422 |
+
available_gpus: [6]
|
| 423 |
+
available_gpus: [6]
|
| 424 |
+
available_gpus: [6]
|
| 425 |
+
available_gpus: [6]
|
| 426 |
+
available_gpus: [6]
|
| 427 |
+
available_gpus: [6]
|
| 428 |
+
available_gpus: [6]
|
| 429 |
+
available_gpus: [6]
|
| 430 |
+
available_gpus: [6]
|
| 431 |
+
available_gpus: [6]
|
| 432 |
+
available_gpus: [6]
|
| 433 |
+
available_gpus: [6]
|
| 434 |
+
available_gpus: [6]
|
| 435 |
+
available_gpus: [6]
|
| 436 |
+
available_gpus: [6]
|
| 437 |
+
available_gpus: [6]
|
| 438 |
+
available_gpus: [6]
|
| 439 |
+
available_gpus: [6]
|
| 440 |
+
available_gpus: [6]
|
| 441 |
+
available_gpus: [6]
|
| 442 |
+
available_gpus: [6]
|
| 443 |
+
available_gpus: [6]
|
| 444 |
+
available_gpus: [6]
|
| 445 |
+
available_gpus: [6]
|
| 446 |
+
available_gpus: [6]
|
| 447 |
+
available_gpus: [6]
|
| 448 |
+
available_gpus: [6]
|
| 449 |
+
available_gpus: [6]
|
| 450 |
+
available_gpus: [6]
|
| 451 |
+
available_gpus: [6]
|
| 452 |
+
available_gpus: [6]
|
| 453 |
+
available_gpus: [6]
|
| 454 |
+
available_gpus: [6]
|
| 455 |
+
available_gpus: [6]
|
| 456 |
+
available_gpus: [6]
|
| 457 |
+
available_gpus: [6]
|
| 458 |
+
available_gpus: [6]
|
| 459 |
+
available_gpus: [6]
|
| 460 |
+
available_gpus: [6]
|
| 461 |
+
available_gpus: [6]
|
| 462 |
+
available_gpus: [6]
|
| 463 |
+
available_gpus: [6]
|
| 464 |
+
available_gpus: [6]
|
| 465 |
+
available_gpus: [6]
|
| 466 |
+
available_gpus: [6]
|
| 467 |
+
available_gpus: [6]
|
| 468 |
+
available_gpus: [6]
|
| 469 |
+
available_gpus: [6]
|
| 470 |
+
available_gpus: [6]
|
| 471 |
+
available_gpus: [6]
|
| 472 |
+
available_gpus: [6]
|
| 473 |
+
available_gpus: [6]
|
| 474 |
+
available_gpus: [6]
|
| 475 |
+
available_gpus: [6]
|
| 476 |
+
available_gpus: [6]
|
| 477 |
+
available_gpus: [6]
|
| 478 |
+
available_gpus: [4, 5, 6]
|
| 479 |
+
The following command is about to run:
|
| 480 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/gaia/inf.log 2>&1 &
|
| 481 |
+
available_gpus: [4, 5, 6]
|
| 482 |
+
The following command is about to run:
|
| 483 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/aime/inf.log 2>&1 &
|
| 484 |
+
available_gpus: [6]
|
| 485 |
+
available_gpus: [4, 6]
|
| 486 |
+
The following command is about to run:
|
| 487 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,6 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name frames --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/frames --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/checkpoint-54 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:16104#LR:1e-5#BASE:QwQ-32B#TOKEN:QwQ-32B#BSZ:1#ACC:8_no_error_data_871_mixed_math/54/frames/inf.log 2>&1 &
|
| 488 |
+
Wish me good luck!
|
deep_search/sft/4-4_no_error_data_871_doc_by_itself_Qwen2.5-32B-Instruct.log
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [78, 68, 55, 41]
|
| 2 |
+
step_list: [78, 68, 55, 41]
|
| 3 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-78) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-68) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500/inf.log 2>&1 &
|
| 9 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-55) to exist...
|
| 10 |
+
The checkpoint exists. Waiting for running...
|
| 11 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500/inf.log 2>&1 &
|
| 12 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-41) to exist...
|
| 13 |
+
The checkpoint exists. Waiting for running...
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500/inf.log 2>&1 &
|
| 15 |
+
All checkpoints exist. Wait for runing...
|
| 16 |
+
available_gpus: [0, 1, 2, 3, 4, 5, 6, 7]
|
| 17 |
+
The following command is about to run:
|
| 18 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=0,1 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/78/eval_old_500/inf.log 2>&1 &
|
| 19 |
+
The following command is about to run:
|
| 20 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/68/eval_old_500/inf.log 2>&1 &
|
| 21 |
+
The following command is about to run:
|
| 22 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/55/eval_old_500/inf.log 2>&1 &
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage.py --dataset_name eval_old_500 --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500 --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:17483#LR:1e-5#BASE:Qwen2.5-32B-Instruct#TOKEN:Qwen2.5-32B-Instruct#BSZ:2#ACC:4_no_error_data_871/41/eval_old_500/inf.log 2>&1 &
|
| 25 |
+
Wish me good luck!
|
deep_search/sft/5-5_ablation_subquery_1073_random_sample_871_Qwen2.5-7B-Instruct.log
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [78, 68, 55, 41, 27]
|
| 2 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-78) to exist...
|
| 3 |
+
The checkpoint exists. Waiting for running...
|
| 4 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/gaia/inf.log 2>&1 &
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/bamboogle/inf.log 2>&1 &
|
| 6 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-68) to exist...
|
| 7 |
+
The checkpoint exists. Waiting for running...
|
| 8 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/gaia/inf.log 2>&1 &
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/bamboogle/inf.log 2>&1 &
|
| 10 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-55) to exist...
|
| 11 |
+
The checkpoint exists. Waiting for running...
|
| 12 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/gaia/inf.log 2>&1 &
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/bamboogle/inf.log 2>&1 &
|
| 14 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-41) to exist...
|
| 15 |
+
The checkpoint exists. Waiting for running...
|
| 16 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/gaia/inf.log 2>&1 &
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/bamboogle/inf.log 2>&1 &
|
| 18 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-27) to exist...
|
| 19 |
+
The checkpoint exists. Waiting for running...
|
| 20 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/27/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/27/gaia/inf.log 2>&1 &
|
| 21 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/27/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/27/bamboogle/inf.log 2>&1 &
|
| 22 |
+
All checkpoints exist. Wait for runing...
|
| 23 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 24 |
+
The following command is about to run:
|
| 25 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/gaia/inf.log 2>&1 &
|
| 26 |
+
The following command is about to run:
|
| 27 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-78 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/78/bamboogle/inf.log 2>&1 &
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/gaia/inf.log 2>&1 &
|
| 30 |
+
available_gpus: [4, 5]
|
| 31 |
+
The following command is about to run:
|
| 32 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-68 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/68/bamboogle/inf.log 2>&1 &
|
| 33 |
+
available_gpus: []
|
| 34 |
+
available_gpus: []
|
| 35 |
+
available_gpus: []
|
| 36 |
+
available_gpus: []
|
| 37 |
+
available_gpus: []
|
| 38 |
+
available_gpus: []
|
| 39 |
+
available_gpus: []
|
| 40 |
+
available_gpus: []
|
| 41 |
+
available_gpus: []
|
| 42 |
+
available_gpus: []
|
| 43 |
+
available_gpus: []
|
| 44 |
+
available_gpus: []
|
| 45 |
+
available_gpus: []
|
| 46 |
+
available_gpus: []
|
| 47 |
+
available_gpus: []
|
| 48 |
+
available_gpus: []
|
| 49 |
+
available_gpus: []
|
| 50 |
+
available_gpus: []
|
| 51 |
+
available_gpus: []
|
| 52 |
+
available_gpus: []
|
| 53 |
+
available_gpus: []
|
| 54 |
+
available_gpus: []
|
| 55 |
+
available_gpus: []
|
| 56 |
+
available_gpus: [4, 5]
|
| 57 |
+
The following command is about to run:
|
| 58 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/gaia/inf.log 2>&1 &
|
| 59 |
+
available_gpus: []
|
| 60 |
+
available_gpus: []
|
| 61 |
+
available_gpus: []
|
| 62 |
+
available_gpus: []
|
| 63 |
+
available_gpus: []
|
| 64 |
+
available_gpus: []
|
| 65 |
+
available_gpus: []
|
| 66 |
+
available_gpus: []
|
| 67 |
+
available_gpus: []
|
| 68 |
+
available_gpus: []
|
| 69 |
+
available_gpus: []
|
| 70 |
+
available_gpus: []
|
| 71 |
+
available_gpus: []
|
| 72 |
+
available_gpus: []
|
| 73 |
+
available_gpus: []
|
| 74 |
+
available_gpus: []
|
| 75 |
+
available_gpus: []
|
| 76 |
+
available_gpus: []
|
| 77 |
+
available_gpus: []
|
| 78 |
+
available_gpus: []
|
| 79 |
+
available_gpus: []
|
| 80 |
+
available_gpus: []
|
| 81 |
+
available_gpus: []
|
| 82 |
+
available_gpus: []
|
| 83 |
+
available_gpus: []
|
| 84 |
+
available_gpus: [6, 7]
|
| 85 |
+
The following command is about to run:
|
| 86 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-55 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/55/bamboogle/inf.log 2>&1 &
|
| 87 |
+
available_gpus: []
|
| 88 |
+
available_gpus: [6, 7]
|
| 89 |
+
The following command is about to run:
|
| 90 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/gaia/inf.log 2>&1 &
|
| 91 |
+
available_gpus: [2, 3]
|
| 92 |
+
The following command is about to run:
|
| 93 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name bamboogle --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/bamboogle --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-41 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/41/bamboogle/inf.log 2>&1 &
|
| 94 |
+
available_gpus: []
|
| 95 |
+
available_gpus: []
|
| 96 |
+
available_gpus: []
|
| 97 |
+
available_gpus: [4, 5]
|
| 98 |
+
The following command is about to run:
|
| 99 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_pool_vllm_qwq.py --dataset_name gaia --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/27/gaia --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/checkpoint-27 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:15751#LR:1e-5#BASE:Qwen2.5-7B-Instruct#TOKEN:Qwen2.5-7B-Instruct#BSZ:2#ACC:4_ablation_subquery_1073_random_sample_871_ablation/27/gaia/inf.log 2>&1 &
|
| 100 |
+
available_gpus: []
|
| 101 |
+
available_gpus: []
|
| 102 |
+
available_gpus: []
|
| 103 |
+
available_gpus: []
|
| 104 |
+
available_gpus: []
|
| 105 |
+
available_gpus: []
|
| 106 |
+
available_gpus: []
|
| 107 |
+
available_gpus: []
|
| 108 |
+
available_gpus: []
|
| 109 |
+
available_gpus: []
|
| 110 |
+
available_gpus: []
|
| 111 |
+
available_gpus: []
|
| 112 |
+
available_gpus: []
|
| 113 |
+
available_gpus: []
|
| 114 |
+
available_gpus: []
|
deep_search/sft/ds_zero3.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bf16": {
|
| 3 |
+
"enabled": "auto"
|
| 4 |
+
},
|
| 5 |
+
"zero_optimization": {
|
| 6 |
+
"stage": 3,
|
| 7 |
+
"overlap_comm": true,
|
| 8 |
+
"contiguous_gradients": true,
|
| 9 |
+
"sub_group_size": 5E8,
|
| 10 |
+
"reduce_bucket_size": "auto",
|
| 11 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 12 |
+
"stage3_param_persistence_threshold": "auto",
|
| 13 |
+
"stage3_max_live_parameters": 5E8,
|
| 14 |
+
"stage3_max_reuse_distance": 5E8,
|
| 15 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 16 |
+
},
|
| 17 |
+
"gradient_accumulation_steps": "auto",
|
| 18 |
+
"gradient_clipping": "auto",
|
| 19 |
+
"steps_per_print": 2000,
|
| 20 |
+
"train_batch_size": "auto",
|
| 21 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 22 |
+
"wall_clock_breakdown": false,
|
| 23 |
+
"flops_profiler": {
|
| 24 |
+
"enabled": false,
|
| 25 |
+
"profile_step": 10,
|
| 26 |
+
"module_depth": -1,
|
| 27 |
+
"top_modules": 3,
|
| 28 |
+
"detailed": true,
|
| 29 |
+
"output_file": "flops_profiler.out"
|
| 30 |
+
}
|
| 31 |
+
}
|
deep_search/sft/gen_data_1.log
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Traceback (most recent call last):
|
| 2 |
+
File "/opt/aps/workdir/sunshuang/deep_search/search_o1/gen_data.py", line 4, in <module>
|
| 3 |
+
from vllm import LLM, SamplingParams
|
| 4 |
+
ModuleNotFoundError: No module named 'vllm'
|
deep_search/sft/math_eval.sh
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
cd /opt/aps/workdir/math/evaluation
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=6,7
|
| 5 |
+
|
| 6 |
+
dataset_name=AIME24
|
| 7 |
+
model_name=DeepSeek-R1-Distill-Qwen-32
|
| 8 |
+
model_path=/capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32
|
| 9 |
+
max_tokens=20000
|
| 10 |
+
|
| 11 |
+
target_path=/opt/aps/workdir/math/evaluation/outputs/${model_name}/${dataset_name}
|
| 12 |
+
log_path=/opt/aps/workdir/math/evaluation/logs/${model_name}/${dataset_name}
|
| 13 |
+
mkdir -p ${target_path}
|
| 14 |
+
mkdir -p ${log_path}
|
| 15 |
+
|
| 16 |
+
nohup /opt/aps/workdir/miniforge3/envs/search_o1/bin/python -u run_eval_32b.py \
|
| 17 |
+
--data_name ${dataset_name} \
|
| 18 |
+
--target_path ${target_path} \
|
| 19 |
+
--model_name_or_path ${model_path} \
|
| 20 |
+
--prompt v4 \
|
| 21 |
+
--max_tokens ${max_tokens} \
|
| 22 |
+
--paralle_size 2 > ${log_path}/inf.log 2>&1 &
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
dataset_name=AMC23
|
| 26 |
+
nohup /opt/aps/workdir/miniforge3/envs/search_o1/bin/python -u run_eval_32b.py \
|
| 27 |
+
--data_name ${dataset_name} \
|
| 28 |
+
--target_path /opt/aps/workdir/math/evaluation/outputs/${model_name}/${dataset_name} \
|
| 29 |
+
--model_name_or_path ${model_path} \
|
| 30 |
+
--prompt v4 \
|
| 31 |
+
--max_tokens ${max_tokens} \
|
| 32 |
+
--paralle_size 2 > /opt/aps/workdir/math/evaluation/logs/${model_name}/${dataset_name}/inf.log 2>&1 &
|
deep_search/sft/mix_1.sh
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# export CUDA_VISIBLE_DEVICES=0,1,2,3
|
| 6 |
+
# --include localhost:0,1,2,3,4,5,6,7 \
|
| 7 |
+
|
| 8 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 9 |
+
# --master_port=9944 \
|
| 10 |
+
# --include localhost:4,5,6,7 \
|
| 11 |
+
# sft.py \
|
| 12 |
+
# --deepspeed ds_zero3.json \
|
| 13 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 14 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-1.5B \
|
| 15 |
+
# --do_train \
|
| 16 |
+
# --save_safetensors true \
|
| 17 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/merged_selected_data.json \
|
| 18 |
+
# --lr_scheduler_type cosine \
|
| 19 |
+
# --output_dir output/checkpoint/qwen_32B_test \
|
| 20 |
+
# --overwrite_output_dir \
|
| 21 |
+
# --warmup_ratio 0.03 \
|
| 22 |
+
# --gradient_checkpointing true \
|
| 23 |
+
# --per_device_train_batch_size 2 \
|
| 24 |
+
# --gradient_accumulation_steps 2 \
|
| 25 |
+
# --logging_steps 1 \
|
| 26 |
+
# --learning_rate 2e-5 \
|
| 27 |
+
# --num_train_epochs 2 \
|
| 28 |
+
# --save_steps 400 \
|
| 29 |
+
# --model_max_length 8192 \
|
| 30 |
+
# --save_total_limit 16 \
|
| 31 |
+
# --bf16 || exit 1
|
| 32 |
+
|
| 33 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 34 |
+
# --master_port=9944 \
|
| 35 |
+
# sft.py \
|
| 36 |
+
# --deepspeed ds_zero3.json \
|
| 37 |
+
# --model_name_or_path /capacity/userdata/models/Qwen2.5-32B-Instruct \
|
| 38 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 39 |
+
# --do_train \
|
| 40 |
+
# --save_safetensors true \
|
| 41 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 42 |
+
# --lr_scheduler_type cosine \
|
| 43 |
+
# --output_dir output/checkpoint/qwen_2_5_32b_data_1217 \
|
| 44 |
+
# --overwrite_output_dir \
|
| 45 |
+
# --warmup_ratio 0.03 \
|
| 46 |
+
# --gradient_checkpointing true \
|
| 47 |
+
# --per_device_train_batch_size 1 \
|
| 48 |
+
# --gradient_accumulation_steps 4 \
|
| 49 |
+
# --logging_steps 1 \
|
| 50 |
+
# --learning_rate 2e-5 \
|
| 51 |
+
# --num_train_epochs 1 \
|
| 52 |
+
# --save_steps 400 \
|
| 53 |
+
# --model_max_length 8192 \
|
| 54 |
+
# --save_total_limit 16 \
|
| 55 |
+
# --bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 58 |
+
# --master_port=9944 \
|
| 59 |
+
# --include localhost:4,5,6,7 \
|
| 60 |
+
# sft.py \
|
| 61 |
+
# --deepspeed ds_zero3.json \
|
| 62 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 63 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 64 |
+
# --do_train \
|
| 65 |
+
# --save_safetensors true \
|
| 66 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 67 |
+
# --lr_scheduler_type cosine \
|
| 68 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 69 |
+
# --overwrite_output_dir \
|
| 70 |
+
# --warmup_ratio 0.03 \
|
| 71 |
+
# --gradient_checkpointing true \
|
| 72 |
+
# --per_device_train_batch_size 1 \
|
| 73 |
+
# --gradient_accumulation_steps 4 \
|
| 74 |
+
# --logging_steps 1 \
|
| 75 |
+
# --learning_rate 2e-5 \
|
| 76 |
+
# --num_train_epochs 1 \
|
| 77 |
+
# --save_steps 400 \
|
| 78 |
+
# --model_max_length 8192 \
|
| 79 |
+
# --save_total_limit 16 \
|
| 80 |
+
# --bf16 || exit 1
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# qwen 7b 用自己的tokenizer
|
| 84 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 85 |
+
# --master_port=9944 \
|
| 86 |
+
# --include localhost:6,7 \
|
| 87 |
+
# sft.py \
|
| 88 |
+
# --deepspeed ds_zero3.json \
|
| 89 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 90 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 91 |
+
# --do_train \
|
| 92 |
+
# --save_safetensors true \
|
| 93 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 94 |
+
# --lr_scheduler_type cosine \
|
| 95 |
+
# --output_dir output/checkpoint/qwen_7b_original_tokenizer_inst_data_1217 \
|
| 96 |
+
# --overwrite_output_dir \
|
| 97 |
+
# --warmup_ratio 0.03 \
|
| 98 |
+
# --gradient_checkpointing true \
|
| 99 |
+
# --per_device_train_batch_size 1 \
|
| 100 |
+
# --gradient_accumulation_steps 4 \
|
| 101 |
+
# --logging_steps 1 \
|
| 102 |
+
# --learning_rate 2e-5 \
|
| 103 |
+
# --num_train_epochs 1 \
|
| 104 |
+
# --save_steps 400 \
|
| 105 |
+
# --model_max_length 8192 \
|
| 106 |
+
# --save_total_limit 16 \
|
| 107 |
+
# --bf16 || exit 1
|
| 108 |
+
|
| 109 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 110 |
+
# --master_port=9944 \
|
| 111 |
+
# --include localhost:7 \
|
| 112 |
+
# sft_1.py \
|
| 113 |
+
# --deepspeed ds_zero3.json \
|
| 114 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 115 |
+
# --tokenizer_name_or_path /capacity/userdata/models/DeepSeek-R1-Distill-Qwen-32 \
|
| 116 |
+
# --do_train \
|
| 117 |
+
# --save_safetensors true \
|
| 118 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 119 |
+
# --lr_scheduler_type cosine \
|
| 120 |
+
# --output_dir output/checkpoint/qwen_7b_inst_data_1217 \
|
| 121 |
+
# --overwrite_output_dir \
|
| 122 |
+
# --warmup_ratio 0.03 \
|
| 123 |
+
# --gradient_checkpointing true \
|
| 124 |
+
# --per_device_train_batch_size 1 \
|
| 125 |
+
# --gradient_accumulation_steps 4 \
|
| 126 |
+
# --logging_steps 1 \
|
| 127 |
+
# --learning_rate 2e-5 \
|
| 128 |
+
# --num_train_epochs 1 \
|
| 129 |
+
# --save_steps 400 \
|
| 130 |
+
# --model_max_length 8192 \
|
| 131 |
+
# --save_total_limit 16 \
|
| 132 |
+
# --bf16 || exit 1
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# qwen 7b 用自己的tokenizer
|
| 137 |
+
# /opt/aps/workdir/miniforge3/envs/train/bin/deepspeed \
|
| 138 |
+
# --master_port=9944 \
|
| 139 |
+
# --include localhost:6,7 \
|
| 140 |
+
# sft.py \
|
| 141 |
+
# --deepspeed ds_zero3.json \
|
| 142 |
+
# --model_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 143 |
+
# --tokenizer_name_or_path /opt/aps/workdir/home/.cache/modelscope/hub/models/qwen/Qwen2.5-7B-Instruct \
|
| 144 |
+
# --do_train \
|
| 145 |
+
# --save_safetensors true \
|
| 146 |
+
# --data_path /opt/aps/workdir/input/data/search-o1/hopotqa_1217.json \
|
| 147 |
+
# --lr_scheduler_type cosine \
|
| 148 |
+
# --output_dir output/sft_use_original_tokenizer/qwen_7b_original_tokenizer_inst_data_1217_1 \
|
| 149 |
+
# --overwrite_output_dir \
|
| 150 |
+
# --warmup_ratio 0.03 \
|
| 151 |
+
# --gradient_checkpointing true \
|
| 152 |
+
# --per_device_train_batch_size 1 \
|
| 153 |
+
# --gradient_accumulation_steps 4 \
|
| 154 |
+
# --logging_steps 1 \
|
| 155 |
+
# --learning_rate 2e-5 \
|
| 156 |
+
# --num_train_epochs 1 \
|
| 157 |
+
# --model_max_length 8192 \
|
| 158 |
+
# --save_total_limit 16 \
|
| 159 |
+
# --bf16 || exit 1
|
| 160 |
+
# 定义参数
|
| 161 |
+
lr=1e-5
|
| 162 |
+
base=QwQ-32B
|
| 163 |
+
tokenizer=QwQ-32B
|
| 164 |
+
# train_data=hopotqa_1217.json
|
| 165 |
+
train_data=strict_selected_1526_sft
|
| 166 |
+
bsz=2
|
| 167 |
+
acc=4
|
| 168 |
+
|
| 169 |
+
# 生成随机 JOB-ID
|
| 170 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 171 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 172 |
+
|
| 173 |
+
# 输出路径
|
| 174 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 175 |
+
|
| 176 |
+
output_dir_1=${output_dir}
|
| 177 |
+
model_name_1=${base}
|
| 178 |
+
# 创建输出目录
|
| 179 |
+
mkdir -p "$output_dir"
|
| 180 |
+
|
| 181 |
+
echo ${output_dir}
|
| 182 |
+
|
| 183 |
+
# 执行 deepspeed 命令
|
| 184 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 185 |
+
--master_port=9944 \
|
| 186 |
+
sft_1.py \
|
| 187 |
+
--deepspeed ds_zero3_offload.json \
|
| 188 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 189 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 190 |
+
--do_train \
|
| 191 |
+
--save_safetensors true \
|
| 192 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 193 |
+
--lr_scheduler_type cosine \
|
| 194 |
+
--output_dir "$output_dir" \
|
| 195 |
+
--overwrite_output_dir \
|
| 196 |
+
--warmup_ratio 0.03 \
|
| 197 |
+
--gradient_checkpointing true \
|
| 198 |
+
--per_device_train_batch_size "$bsz" \
|
| 199 |
+
--gradient_accumulation_steps "$acc" \
|
| 200 |
+
--logging_steps 1 \
|
| 201 |
+
--learning_rate "$lr" \
|
| 202 |
+
--num_train_epochs 6 \
|
| 203 |
+
--save_strategy epoch \
|
| 204 |
+
--save_only_model true \
|
| 205 |
+
--model_max_length 30000 \
|
| 206 |
+
--save_total_limit 15 \
|
| 207 |
+
--bf16 || exit 1
|
| 208 |
+
|
| 209 |
+
# 3-15 model_max_length 25000 -> 30000
|
| 210 |
+
#################################################
|
| 211 |
+
lr=1e-5
|
| 212 |
+
base=Qwen2.5-32B-Instruct
|
| 213 |
+
tokenizer=Qwen2.5-32B-Instruct
|
| 214 |
+
# train_data=hopotqa_1217.json
|
| 215 |
+
train_data=strict_selected_1526_sft
|
| 216 |
+
bsz=2
|
| 217 |
+
acc=4
|
| 218 |
+
|
| 219 |
+
# 生成随机 JOB-ID
|
| 220 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 221 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 222 |
+
|
| 223 |
+
# 输出路径
|
| 224 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 225 |
+
|
| 226 |
+
output_dir_2=${output_dir}
|
| 227 |
+
model_name_2=${base}
|
| 228 |
+
# 创建输出目录
|
| 229 |
+
mkdir -p "$output_dir"
|
| 230 |
+
|
| 231 |
+
echo ${output_dir}
|
| 232 |
+
|
| 233 |
+
# 执行 deepspeed 命令
|
| 234 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 235 |
+
--master_port=9944 \
|
| 236 |
+
sft_1.py \
|
| 237 |
+
--deepspeed ds_zero3_offload.json \
|
| 238 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 239 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 240 |
+
--do_train \
|
| 241 |
+
--save_safetensors true \
|
| 242 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 243 |
+
--lr_scheduler_type cosine \
|
| 244 |
+
--output_dir "$output_dir" \
|
| 245 |
+
--overwrite_output_dir \
|
| 246 |
+
--warmup_ratio 0.03 \
|
| 247 |
+
--gradient_checkpointing true \
|
| 248 |
+
--per_device_train_batch_size "$bsz" \
|
| 249 |
+
--gradient_accumulation_steps "$acc" \
|
| 250 |
+
--logging_steps 1 \
|
| 251 |
+
--learning_rate "$lr" \
|
| 252 |
+
--num_train_epochs 6 \
|
| 253 |
+
--save_strategy epoch \
|
| 254 |
+
--save_only_model true \
|
| 255 |
+
--model_max_length 30000 \
|
| 256 |
+
--save_total_limit 15 \
|
| 257 |
+
--bf16 || exit 1
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
########################
|
| 262 |
+
lr=1e-5
|
| 263 |
+
base=Qwen2.5-7B-Instruct
|
| 264 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 265 |
+
# train_data=hopotqa_1217.json
|
| 266 |
+
train_data=strict_selected_1526_sft
|
| 267 |
+
bsz=2
|
| 268 |
+
acc=4
|
| 269 |
+
|
| 270 |
+
# 生成随机 JOB-ID
|
| 271 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 272 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 273 |
+
|
| 274 |
+
# 输出路径
|
| 275 |
+
output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 276 |
+
|
| 277 |
+
output_dir_3=${output_dir}
|
| 278 |
+
model_name_3=${base}
|
| 279 |
+
# 创建输出目录
|
| 280 |
+
mkdir -p "$output_dir"
|
| 281 |
+
|
| 282 |
+
echo ${output_dir}
|
| 283 |
+
|
| 284 |
+
# 执行 deepspeed 命令
|
| 285 |
+
/share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 286 |
+
--master_port=9944 \
|
| 287 |
+
sft_1.py \
|
| 288 |
+
--deepspeed ds_zero3_offload.json \
|
| 289 |
+
--model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 290 |
+
--tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 291 |
+
--do_train \
|
| 292 |
+
--save_safetensors true \
|
| 293 |
+
--data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 294 |
+
--lr_scheduler_type cosine \
|
| 295 |
+
--output_dir "$output_dir" \
|
| 296 |
+
--overwrite_output_dir \
|
| 297 |
+
--warmup_ratio 0.03 \
|
| 298 |
+
--gradient_checkpointing true \
|
| 299 |
+
--per_device_train_batch_size "$bsz" \
|
| 300 |
+
--gradient_accumulation_steps "$acc" \
|
| 301 |
+
--logging_steps 1 \
|
| 302 |
+
--learning_rate "$lr" \
|
| 303 |
+
--num_train_epochs 6 \
|
| 304 |
+
--save_strategy epoch \
|
| 305 |
+
--save_only_model true \
|
| 306 |
+
--model_max_length 30000 \
|
| 307 |
+
--save_total_limit 15 \
|
| 308 |
+
--bf16 || exit 1
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
bash test.sh $output_dir_1 $model_name_1
|
| 312 |
+
bash test.sh $output_dir_2 $model_name_2
|
| 313 |
+
bash test.sh $output_dir_3 $model_name_3
|
| 314 |
+
|
| 315 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 316 |
+
|
| 317 |
+
|
deep_search/sft/mix_math_first.sh
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# 定义参数
|
| 6 |
+
lr=1e-5
|
| 7 |
+
base=Qwen2.5-7B-Instruct
|
| 8 |
+
tokenizer=Qwen2.5-7B-Instruct
|
| 9 |
+
# train_data=hopotqa_1217.json
|
| 10 |
+
train_data=math_qwq_4524_selected_add_prompt_871
|
| 11 |
+
bsz=2
|
| 12 |
+
acc=4
|
| 13 |
+
|
| 14 |
+
# 生成随机 JOB-ID
|
| 15 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 16 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 17 |
+
|
| 18 |
+
# 输出路径
|
| 19 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 20 |
+
|
| 21 |
+
output_dir_1=${output_dir}
|
| 22 |
+
model_name_1=${base}
|
| 23 |
+
dataset_1=${train_data}
|
| 24 |
+
# 创建输出目录
|
| 25 |
+
mkdir -p "$output_dir"
|
| 26 |
+
|
| 27 |
+
echo ${output_dir}
|
| 28 |
+
|
| 29 |
+
# 执行 deepspeed 命令
|
| 30 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 31 |
+
--master_port=9944 \
|
| 32 |
+
sft_2_math_after_search.py \
|
| 33 |
+
--deepspeed ds_zero3_offload.json \
|
| 34 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 35 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 36 |
+
--do_train \
|
| 37 |
+
--save_safetensors true \
|
| 38 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 39 |
+
--lr_scheduler_type cosine \
|
| 40 |
+
--output_dir "$output_dir" \
|
| 41 |
+
--overwrite_output_dir \
|
| 42 |
+
--warmup_ratio 0.03 \
|
| 43 |
+
--gradient_checkpointing true \
|
| 44 |
+
--per_device_train_batch_size "$bsz" \
|
| 45 |
+
--gradient_accumulation_steps "$acc" \
|
| 46 |
+
--logging_steps 1 \
|
| 47 |
+
--learning_rate "$lr" \
|
| 48 |
+
--num_train_epochs 6 \
|
| 49 |
+
--save_strategy epoch \
|
| 50 |
+
--save_only_model true \
|
| 51 |
+
--model_max_length 30000 \
|
| 52 |
+
--save_total_limit 10 \
|
| 53 |
+
--bf16 || exit 1
|
| 54 |
+
|
| 55 |
+
bash test_two_model.sh $output_dir_1 $model_name_1 $dataset_1
|
| 56 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 57 |
+
# bash test.sh $output_dir_3 $model_name_3
|
| 58 |
+
|
| 59 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 60 |
+
|
| 61 |
+
|
deep_search/sft/mix_math_multi_node.sh
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
# 定义参数
|
| 6 |
+
lr=1e-5
|
| 7 |
+
base=QwQ-32B
|
| 8 |
+
tokenizer=QwQ-32B
|
| 9 |
+
# train_data=hopotqa_1217.json
|
| 10 |
+
train_data=no_error_data_871
|
| 11 |
+
bsz=1
|
| 12 |
+
acc=8
|
| 13 |
+
|
| 14 |
+
# 生成随机 JOB-ID
|
| 15 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 16 |
+
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_mixed_math"
|
| 17 |
+
|
| 18 |
+
# 输出路径
|
| 19 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 20 |
+
|
| 21 |
+
output_dir_1=${output_dir}
|
| 22 |
+
model_name_1=${base}
|
| 23 |
+
dataset_1=${train_data}
|
| 24 |
+
# 创建输出目录
|
| 25 |
+
mkdir -p "$output_dir"
|
| 26 |
+
|
| 27 |
+
echo ${output_dir}
|
| 28 |
+
|
| 29 |
+
# 执行 deepspeed 命令
|
| 30 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 31 |
+
--hostfile=hostfile \
|
| 32 |
+
--no_ssh \
|
| 33 |
+
--node_rank=0 \
|
| 34 |
+
--master_addr=172.19.164.116 \
|
| 35 |
+
--master_port=9944 \
|
| 36 |
+
sft_2_math.py \
|
| 37 |
+
--deepspeed ds_zero3_offload.json \
|
| 38 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 39 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 40 |
+
--do_train \
|
| 41 |
+
--save_safetensors true \
|
| 42 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 43 |
+
--lr_scheduler_type cosine \
|
| 44 |
+
--output_dir "$output_dir" \
|
| 45 |
+
--overwrite_output_dir \
|
| 46 |
+
--warmup_ratio 0.03 \
|
| 47 |
+
--gradient_checkpointing true \
|
| 48 |
+
--per_device_train_batch_size "$bsz" \
|
| 49 |
+
--gradient_accumulation_steps "$acc" \
|
| 50 |
+
--logging_steps 1 \
|
| 51 |
+
--learning_rate "$lr" \
|
| 52 |
+
--num_train_epochs 6 \
|
| 53 |
+
--save_strategy epoch \
|
| 54 |
+
--save_only_model true \
|
| 55 |
+
--model_max_length 30000 \
|
| 56 |
+
--save_total_limit 6 \
|
| 57 |
+
--bf16 || exit 1
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
bash test_two_model_qwq.sh $output_dir_1 $model_name_1 $dataset_1
|
| 61 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 62 |
+
# bash test.sh $output_dir_3 $model_name_3
|
| 63 |
+
|
| 64 |
+
# bash test.sh $output_dir_2 $model_name_2
|
| 65 |
+
|
| 66 |
+
|
deep_search/sft/mix_re.sh
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
export OMP_NUM_THREADS=20
|
| 4 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 5 |
+
|
| 6 |
+
###########################
|
| 7 |
+
# 定义参数
|
| 8 |
+
lr=1e-5
|
| 9 |
+
base=QwQ-32B
|
| 10 |
+
tokenizer=QwQ-32B
|
| 11 |
+
# train_data=hopotqa_1217.json
|
| 12 |
+
train_data=merged_syn_long_359_sft_1533
|
| 13 |
+
bsz=2
|
| 14 |
+
acc=4
|
| 15 |
+
|
| 16 |
+
# 生成随机 JOB-ID
|
| 17 |
+
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 18 |
+
save_path="JOB:${2323}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 19 |
+
|
| 20 |
+
# 输出路径
|
| 21 |
+
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
|
| 22 |
+
|
| 23 |
+
output_dir_1=${output_dir}
|
| 24 |
+
model_name_1=${base}
|
| 25 |
+
dataset_1=${train_data}
|
| 26 |
+
# 创建输出目录
|
| 27 |
+
mkdir -p "$output_dir"
|
| 28 |
+
|
| 29 |
+
echo ${output_dir}
|
| 30 |
+
|
| 31 |
+
# 执行 deepspeed 命令
|
| 32 |
+
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
|
| 33 |
+
--master_port=9944 \
|
| 34 |
+
sft_1.py \
|
| 35 |
+
--deepspeed ds_zero3_offload.json \
|
| 36 |
+
--model_name_or_path "/capacity/userdata/models/${base}" \
|
| 37 |
+
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
|
| 38 |
+
--do_train \
|
| 39 |
+
--save_safetensors true \
|
| 40 |
+
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 41 |
+
--lr_scheduler_type cosine \
|
| 42 |
+
--output_dir "$output_dir" \
|
| 43 |
+
--overwrite_output_dir \
|
| 44 |
+
--warmup_ratio 0.03 \
|
| 45 |
+
--gradient_checkpointing true \
|
| 46 |
+
--per_device_train_batch_size "$bsz" \
|
| 47 |
+
--gradient_accumulation_steps "$acc" \
|
| 48 |
+
--logging_steps 1 \
|
| 49 |
+
--learning_rate "$lr" \
|
| 50 |
+
--num_train_epochs 10000 \
|
| 51 |
+
--save_strategy epoch \
|
| 52 |
+
--save_only_model true \
|
| 53 |
+
--model_max_length 30000 \
|
| 54 |
+
--save_total_limit 1 \
|
| 55 |
+
--bf16 || exit 1
|
| 56 |
+
|
| 57 |
+
# 3-15 model_max_length 25000 -> 30000
|
| 58 |
+
#################################################
|
| 59 |
+
# lr=1e-5
|
| 60 |
+
# base=QwQ-32B
|
| 61 |
+
# tokenizer=QwQ-32B
|
| 62 |
+
# # train_data=hopotqa_1217.json
|
| 63 |
+
# train_data=merged_syn_short_398_sft_1572
|
| 64 |
+
# bsz=2
|
| 65 |
+
# acc=4
|
| 66 |
+
|
| 67 |
+
# # 生成随机 JOB-ID
|
| 68 |
+
# JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 69 |
+
# save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 70 |
+
|
| 71 |
+
# # 输出路径
|
| 72 |
+
# output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 73 |
+
|
| 74 |
+
# output_dir_2=${output_dir}
|
| 75 |
+
# model_name_2=${base}
|
| 76 |
+
# dataset_2=${train_data}
|
| 77 |
+
# # 创建输出目录
|
| 78 |
+
# mkdir -p "$output_dir"
|
| 79 |
+
|
| 80 |
+
# echo ${output_dir}
|
| 81 |
+
|
| 82 |
+
# # 执行 deepspeed 命令
|
| 83 |
+
# /share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 84 |
+
# --master_port=9944 \
|
| 85 |
+
# sft_1.py \
|
| 86 |
+
# --deepspeed ds_zero3_offload.json \
|
| 87 |
+
# --model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 88 |
+
# --tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 89 |
+
# --do_train \
|
| 90 |
+
# --save_safetensors true \
|
| 91 |
+
# --data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 92 |
+
# --lr_scheduler_type cosine \
|
| 93 |
+
# --output_dir "$output_dir" \
|
| 94 |
+
# --overwrite_output_dir \
|
| 95 |
+
# --warmup_ratio 0.03 \
|
| 96 |
+
# --gradient_checkpointing true \
|
| 97 |
+
# --per_device_train_batch_size "$bsz" \
|
| 98 |
+
# --gradient_accumulation_steps "$acc" \
|
| 99 |
+
# --logging_steps 1 \
|
| 100 |
+
# --learning_rate "$lr" \
|
| 101 |
+
# --num_train_epochs 6 \
|
| 102 |
+
# --save_strategy epoch \
|
| 103 |
+
# --save_only_model true \
|
| 104 |
+
# --model_max_length 30000 \
|
| 105 |
+
# --save_total_limit 15 \
|
| 106 |
+
# --bf16 || exit 1
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# ###################################
|
| 111 |
+
# lr=1e-5
|
| 112 |
+
# base=QwQ-32B
|
| 113 |
+
# tokenizer=QwQ-32B
|
| 114 |
+
# # train_data=hopotqa_1217.json
|
| 115 |
+
# train_data=strict_selected_1526_sft_format_ans
|
| 116 |
+
# bsz=2
|
| 117 |
+
# acc=4
|
| 118 |
+
|
| 119 |
+
# # 生成随机 JOB-ID
|
| 120 |
+
# JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
|
| 121 |
+
# save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}"
|
| 122 |
+
|
| 123 |
+
# # 输出路径
|
| 124 |
+
# output_dir="/share/project/sunshuang/deep_search/output/${save_path}"
|
| 125 |
+
|
| 126 |
+
# output_dir_3=${output_dir}
|
| 127 |
+
# model_name_3=${base}
|
| 128 |
+
# dataset_3=${train_data}
|
| 129 |
+
# # 创建输出目录
|
| 130 |
+
# mkdir -p "$output_dir"
|
| 131 |
+
|
| 132 |
+
# echo ${output_dir}
|
| 133 |
+
|
| 134 |
+
# # 执行 deepspeed 命令
|
| 135 |
+
# /share/project/miniconda/envs/ss_train/bin/deepspeed \
|
| 136 |
+
# --master_port=9944 \
|
| 137 |
+
# sft_1.py \
|
| 138 |
+
# --deepspeed ds_zero3_offload.json \
|
| 139 |
+
# --model_name_or_path "/share/project/zhipengchen/model/${base}" \
|
| 140 |
+
# --tokenizer_name_or_path "/share/project/zhipengchen/model/${tokenizer}" \
|
| 141 |
+
# --do_train \
|
| 142 |
+
# --save_safetensors true \
|
| 143 |
+
# --data_path "/share/project/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
|
| 144 |
+
# --lr_scheduler_type cosine \
|
| 145 |
+
# --output_dir "$output_dir" \
|
| 146 |
+
# --overwrite_output_dir \
|
| 147 |
+
# --warmup_ratio 0.03 \
|
| 148 |
+
# --gradient_checkpointing true \
|
| 149 |
+
# --per_device_train_batch_size "$bsz" \
|
| 150 |
+
# --gradient_accumulation_steps "$acc" \
|
| 151 |
+
# --logging_steps 1 \
|
| 152 |
+
# --learning_rate "$lr" \
|
| 153 |
+
# --num_train_epochs 6 \
|
| 154 |
+
# --save_strategy epoch \
|
| 155 |
+
# --save_only_model true \
|
| 156 |
+
# --model_max_length 30000 \
|
| 157 |
+
# --save_total_limit 15 \
|
| 158 |
+
# --bf16 || exit 1
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
###################################
|
| 163 |
+
# 测试
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
# bash test.sh $output_dir_1 $model_name_1 $dataset_1
|
| 167 |
+
|
| 168 |
+
# sleep 7200
|
| 169 |
+
# bash test.sh $output_dir_2 $model_name_2 $dataset_2
|
| 170 |
+
|
| 171 |
+
# sleep 600
|
| 172 |
+
# bash test.sh $output_dir_3 $model_name_3 $dataset_3
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
deep_search/sft/nohup.out
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step_list: [6, 5, 4, 3]
|
| 2 |
+
step_list: [6, 5, 4, 3]
|
| 3 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-6) to exist...
|
| 4 |
+
The checkpoint exists. Waiting for running...
|
| 5 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-6 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/eval/inf.log 2>&1 &
|
| 6 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-6 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/aime/inf.log 2>&1 &
|
| 7 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-5) to exist...
|
| 8 |
+
The checkpoint exists. Waiting for running...
|
| 9 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-5 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/eval/inf.log 2>&1 &
|
| 10 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-5 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/aime/inf.log 2>&1 &
|
| 11 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-4) to exist...
|
| 12 |
+
The checkpoint exists. Waiting for running...
|
| 13 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-4 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/eval/inf.log 2>&1 &
|
| 14 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-4 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/aime/inf.log 2>&1 &
|
| 15 |
+
Waiting for checkpoint (/capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-3) to exist...
|
| 16 |
+
The checkpoint exists. Waiting for running...
|
| 17 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/3/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-3 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/3/eval/inf.log 2>&1 &
|
| 18 |
+
command: export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES={device} nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/3/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-3 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/3/aime/inf.log 2>&1 &
|
| 19 |
+
All checkpoints exist. Wait for runing...
|
| 20 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 21 |
+
The following command is about to run:
|
| 22 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-6 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/eval/inf.log 2>&1 &
|
| 23 |
+
The following command is about to run:
|
| 24 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-6 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/6/aime/inf.log 2>&1 &
|
| 25 |
+
The following command is about to run:
|
| 26 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-5 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/eval/inf.log 2>&1 &
|
| 27 |
+
available_gpus: [2, 3, 4, 5, 6, 7]
|
| 28 |
+
The following command is about to run:
|
| 29 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=2,3 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-5 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/5/aime/inf.log 2>&1 &
|
| 30 |
+
The following command is about to run:
|
| 31 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=4,5 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name eval --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/eval --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-4 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/eval/inf.log 2>&1 &
|
| 32 |
+
The following command is about to run:
|
| 33 |
+
export https_proxy=http://127.0.0.1:7890 && export http_proxy=http://127.0.0.1:7890 && export all_proxy=socks5://127.0.0.1:7891 && cd /opt/aps/workdir/sunshuang/deep_search/search_o1 && CUDA_VISIBLE_DEVICES=6,7 nohup /opt/aps/workdir/search_o1/bin/python3 -u scripts/search_o1_sum_all_webpage_two_model_vllm.py --dataset_name aime --cache_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/cache_eval_sum_all_webpage_1w --output_dir_base /opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/aime --split test --max_search_limit 10 --max_turn 10 --top_k 10 --max_doc_len 5000 --model_path /capacity/userdata/ss/sft_search/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/checkpoint-4 --bing_subscription_key "cb0d28279a826d7e5cf22d71f683c77ffd4ba27d" --bing_endpoint "https://google.serper.dev/search" > /opt/aps/workdir/sunshuang/deep_search/search_o1/logs/log_eval/JOB:10457#LR:1e-5#BASE:qwen7b_sft_871_checkpoint-78#TOKEN:qwen7b_sft_871_checkpoint-78#BSZ:2#ACC:4_math_qwq_4524_selected_add_prompt_871/4/aime/inf.log 2>&1 &
|
| 34 |
+
Traceback (most recent call last):
|
| 35 |
+
File "/opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py", line 178, in <module>
|
| 36 |
+
clear()
|
| 37 |
+
File "/opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py", line 170, in clear
|
| 38 |
+
commands = run(commands)
|
| 39 |
+
File "/opt/aps/workdir/sunshuang/deep_search/sft/wait_eval_use_one_model_for_ckpt_two_model.py", line 41, in run
|
| 40 |
+
sleep(SLEEP_TIME)
|
| 41 |
+
KeyboardInterrupt
|
deep_search/sft/sft_1.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import Optional, Dict, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import random_split
|
| 11 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 12 |
+
import transformers
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
from transformers import Trainer
|
| 15 |
+
import random
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from transformers import AutoModelForCausalLM, TrainingArguments
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
from transformers import DataCollatorForSeq2Seq
|
| 20 |
+
import shutil
|
| 21 |
+
|
| 22 |
+
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import matplotlib.pyplot as plt
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class ModelArguments:
|
| 31 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 32 |
+
# flash_attention: Optional[bool] = field(default=False)
|
| 33 |
+
tokenizer_name_or_path: Optional[str] = field(default=None)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class DataArguments:
|
| 38 |
+
data_path: str = field(
|
| 39 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 40 |
+
)
|
| 41 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 42 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 47 |
+
cache_dir: Optional[str] = field(default=None)
|
| 48 |
+
optim: str = field(default="adamw_torch")
|
| 49 |
+
model_max_length: int = field(
|
| 50 |
+
default=512,
|
| 51 |
+
metadata={
|
| 52 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 53 |
+
},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
IGNORE_INDEX = -100
|
| 58 |
+
MAX_LENGTH = 2000
|
| 59 |
+
|
| 60 |
+
def process(sample, tokenizer):
|
| 61 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 62 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 63 |
+
source = sample["input"]
|
| 64 |
+
# print(source)
|
| 65 |
+
# print(tokenizer.bos_token)
|
| 66 |
+
# print(source == None)
|
| 67 |
+
# if tokenizer.bos_token not in source:
|
| 68 |
+
# source = tokenizer.apply_chat_template(
|
| 69 |
+
# [
|
| 70 |
+
# {'role': 'user', 'content': source}
|
| 71 |
+
# ],
|
| 72 |
+
# tokenize=False, add_generation_prompt=True
|
| 73 |
+
# )
|
| 74 |
+
# # print(source)
|
| 75 |
+
source = tokenizer.apply_chat_template(
|
| 76 |
+
[
|
| 77 |
+
{'role': 'user', 'content': source}
|
| 78 |
+
],
|
| 79 |
+
tokenize=False, add_generation_prompt=True
|
| 80 |
+
)
|
| 81 |
+
# print(source)
|
| 82 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 83 |
+
target = [IGNORE_INDEX] * len(source)
|
| 84 |
+
for output in sample["output"]:
|
| 85 |
+
for k, v in output.items():
|
| 86 |
+
if v is None:
|
| 87 |
+
continue
|
| 88 |
+
v_tokens = tokenizer(v, add_special_tokens=False)["input_ids"]
|
| 89 |
+
if k in ["gen"]:
|
| 90 |
+
source += v_tokens
|
| 91 |
+
target += v_tokens
|
| 92 |
+
elif k in ["doc_gen"]:
|
| 93 |
+
source += v_tokens
|
| 94 |
+
target += [IGNORE_INDEX] * len(v_tokens)
|
| 95 |
+
input_ids = source
|
| 96 |
+
labels = target
|
| 97 |
+
|
| 98 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 99 |
+
labels.append(tokenizer.eos_token_id)
|
| 100 |
+
# if tokenizer.eos_token_id not in source:
|
| 101 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 102 |
+
# if tokenizer.eos_token_id not in labels:
|
| 103 |
+
# labels.append(tokenizer.eos_token_id)
|
| 104 |
+
|
| 105 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 106 |
+
# return None
|
| 107 |
+
result = {
|
| 108 |
+
"input_ids": input_ids,
|
| 109 |
+
"attention_mask": [1] * len(input_ids),
|
| 110 |
+
"labels": labels,
|
| 111 |
+
}
|
| 112 |
+
# print(result)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def process_math(sample, tokenizer):
|
| 117 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 118 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 119 |
+
source = sample["prompt"]
|
| 120 |
+
source = tokenizer.apply_chat_template(
|
| 121 |
+
[
|
| 122 |
+
{'role': 'user', 'content': source}
|
| 123 |
+
],
|
| 124 |
+
tokenize=False, add_generation_prompt=True
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 128 |
+
target = [IGNORE_INDEX] * len(source)
|
| 129 |
+
|
| 130 |
+
output = sample["output"]
|
| 131 |
+
output = tokenizer(output, add_special_tokens=False)["input_ids"]
|
| 132 |
+
|
| 133 |
+
source += output
|
| 134 |
+
target += output
|
| 135 |
+
|
| 136 |
+
input_ids = source
|
| 137 |
+
labels = target
|
| 138 |
+
|
| 139 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 140 |
+
labels.append(tokenizer.eos_token_id)
|
| 141 |
+
# if tokenizer.eos_token_id not in source:
|
| 142 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 143 |
+
# if tokenizer.eos_token_id not in labels:
|
| 144 |
+
# labels.append(tokenizer.eos_token_id)
|
| 145 |
+
|
| 146 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 147 |
+
# return None
|
| 148 |
+
result = {
|
| 149 |
+
"input_ids": input_ids,
|
| 150 |
+
"attention_mask": [1] * len(input_ids),
|
| 151 |
+
"labels": labels,
|
| 152 |
+
}
|
| 153 |
+
# print(result)
|
| 154 |
+
return result
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def print_function(example, tokenizer):
|
| 159 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
| 160 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
| 161 |
+
print("label_ids:\n{}".format(example["labels"]))
|
| 162 |
+
print("labels:\n{}".format(
|
| 163 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
| 164 |
+
))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def get_dataset(file_path, tokenizer, math_dataset=False):
|
| 168 |
+
dataset = load_dataset('json', data_files=file_path)
|
| 169 |
+
train_dataset = dataset["train"]
|
| 170 |
+
file_name = os.path.basename(file_path)
|
| 171 |
+
dataset_name = os.path.splitext(file_name)[0]
|
| 172 |
+
# print(f"dataset_name: {dataset_name}")
|
| 173 |
+
# if os.path.exists(f"input/real_cache/{dataset_name}/"):
|
| 174 |
+
# shutil.rmtree(f"input/real_cache/{dataset_name}/")
|
| 175 |
+
# os.makedirs(f"input/real_cache/{dataset_name}/", exist_ok=True)
|
| 176 |
+
# tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, cache_file_name=f"input/real_cache/{dataset_name}/cache1.arrow")
|
| 177 |
+
|
| 178 |
+
if math_dataset:
|
| 179 |
+
tokenized_dataset = train_dataset.map(process_math, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 180 |
+
else:
|
| 181 |
+
tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 182 |
+
print_function(next(iter(tokenized_dataset)), tokenizer)
|
| 183 |
+
print(f"len of dataset before filter: {len(tokenized_dataset)}")
|
| 184 |
+
|
| 185 |
+
filtered_dataset = []
|
| 186 |
+
for item in tokenized_dataset:
|
| 187 |
+
if len(item["input_ids"]) <= 10000:
|
| 188 |
+
filtered_dataset.append(item)
|
| 189 |
+
print(f"len of dataset after filter: {len(filtered_dataset)}")
|
| 190 |
+
return filtered_dataset
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def train():
|
| 194 |
+
parser = transformers.HfArgumentParser(
|
| 195 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 196 |
+
)
|
| 197 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 198 |
+
|
| 199 |
+
print("==========Model Args=========")
|
| 200 |
+
print(model_args)
|
| 201 |
+
print("==========Data Args=========")
|
| 202 |
+
print(data_args)
|
| 203 |
+
print("==========Training Args=========")
|
| 204 |
+
print(training_args)
|
| 205 |
+
|
| 206 |
+
if training_args.gradient_checkpointing:
|
| 207 |
+
use_cache = False # use_cache与gradient_checkpointing不能同时设置为true
|
| 208 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 209 |
+
model_args.model_name_or_path,
|
| 210 |
+
_attn_implementation="flash_attention_2",
|
| 211 |
+
use_cache=use_cache,
|
| 212 |
+
# save_only_model=True
|
| 213 |
+
).float()
|
| 214 |
+
# model = AutoLigerKernelForCausalLM.from_pretrained(
|
| 215 |
+
# model_args.model_name_or_path,
|
| 216 |
+
# _attn_implementation="flash_attention_2",
|
| 217 |
+
# use_cache=use_cache,
|
| 218 |
+
# # save_only_model=True
|
| 219 |
+
# ).float()
|
| 220 |
+
if model_args.tokenizer_name_or_path is None:
|
| 221 |
+
model_args.tokenizer_name_or_path = model_args.model_name_or_path
|
| 222 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 223 |
+
model_args.tokenizer_name_or_path, model_max_length=training_args.model_max_length
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
if tokenizer.pad_token is None:
|
| 227 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 228 |
+
|
| 229 |
+
# dataset_qa = get_dataset(data_args.data_path, tokenizer)
|
| 230 |
+
# print(f"qa dataset: {len(dataset_qa)}")
|
| 231 |
+
# dataset_math = get_dataset("/opt/aps/workdir/sunshuang/OpenR1-Math-220k/data/OpenR1-Math-94k_processed_1000.json", tokenizer, True)
|
| 232 |
+
# print(f"math dataset: {len(dataset_math)}")
|
| 233 |
+
# dataset = dataset_qa + dataset_math
|
| 234 |
+
dataset = get_dataset(data_args.data_path, tokenizer)
|
| 235 |
+
print(f"fianl dataset: {len(dataset)}")
|
| 236 |
+
|
| 237 |
+
data_collator = DataCollatorForSeq2Seq(
|
| 238 |
+
tokenizer=tokenizer,
|
| 239 |
+
label_pad_token_id=-100,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
trainer = Trainer(
|
| 243 |
+
model=model,
|
| 244 |
+
args=training_args,
|
| 245 |
+
tokenizer=tokenizer,
|
| 246 |
+
data_collator=data_collator,
|
| 247 |
+
train_dataset=dataset,
|
| 248 |
+
)
|
| 249 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 250 |
+
trainer.save_model(training_args.output_dir)
|
| 251 |
+
trainer.save_state()
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
if __name__ == "__main__":
|
| 255 |
+
torch.manual_seed(42)
|
| 256 |
+
train()
|
deep_search/sft/sft_2_math.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import Optional, Dict, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import random_split
|
| 11 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 12 |
+
import transformers
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
from transformers import Trainer
|
| 15 |
+
import random
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from transformers import AutoModelForCausalLM, TrainingArguments
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
from transformers import DataCollatorForSeq2Seq
|
| 20 |
+
import shutil
|
| 21 |
+
|
| 22 |
+
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import matplotlib.pyplot as plt
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class ModelArguments:
|
| 31 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 32 |
+
# flash_attention: Optional[bool] = field(default=False)
|
| 33 |
+
tokenizer_name_or_path: Optional[str] = field(default=None)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class DataArguments:
|
| 38 |
+
data_path: str = field(
|
| 39 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 40 |
+
)
|
| 41 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 42 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 47 |
+
cache_dir: Optional[str] = field(default=None)
|
| 48 |
+
optim: str = field(default="adamw_torch")
|
| 49 |
+
model_max_length: int = field(
|
| 50 |
+
default=512,
|
| 51 |
+
metadata={
|
| 52 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 53 |
+
},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
IGNORE_INDEX = -100
|
| 58 |
+
MAX_LENGTH = 2000
|
| 59 |
+
|
| 60 |
+
def process(sample, tokenizer):
|
| 61 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 62 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 63 |
+
source = sample["input"]
|
| 64 |
+
# print(source)
|
| 65 |
+
# print(tokenizer.bos_token)
|
| 66 |
+
# print(source == None)
|
| 67 |
+
# if tokenizer.bos_token not in source:
|
| 68 |
+
# source = tokenizer.apply_chat_template(
|
| 69 |
+
# [
|
| 70 |
+
# {'role': 'user', 'content': source}
|
| 71 |
+
# ],
|
| 72 |
+
# tokenize=False, add_generation_prompt=True
|
| 73 |
+
# )
|
| 74 |
+
# # print(source)
|
| 75 |
+
source = tokenizer.apply_chat_template(
|
| 76 |
+
[
|
| 77 |
+
{'role': 'user', 'content': source}
|
| 78 |
+
],
|
| 79 |
+
tokenize=False, add_generation_prompt=True
|
| 80 |
+
)
|
| 81 |
+
# print(source)
|
| 82 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 83 |
+
target = [IGNORE_INDEX] * len(source)
|
| 84 |
+
for output in sample["output"]:
|
| 85 |
+
for k, v in output.items():
|
| 86 |
+
if v is None:
|
| 87 |
+
continue
|
| 88 |
+
v_tokens = tokenizer(v, add_special_tokens=False)["input_ids"]
|
| 89 |
+
if k in ["gen"]:
|
| 90 |
+
source += v_tokens
|
| 91 |
+
target += v_tokens
|
| 92 |
+
elif k in ["doc_gen"]:
|
| 93 |
+
source += v_tokens
|
| 94 |
+
target += [IGNORE_INDEX] * len(v_tokens)
|
| 95 |
+
input_ids = source
|
| 96 |
+
labels = target
|
| 97 |
+
|
| 98 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 99 |
+
labels.append(tokenizer.eos_token_id)
|
| 100 |
+
# if tokenizer.eos_token_id not in source:
|
| 101 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 102 |
+
# if tokenizer.eos_token_id not in labels:
|
| 103 |
+
# labels.append(tokenizer.eos_token_id)
|
| 104 |
+
|
| 105 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 106 |
+
# return None
|
| 107 |
+
result = {
|
| 108 |
+
"input_ids": input_ids,
|
| 109 |
+
"attention_mask": [1] * len(input_ids),
|
| 110 |
+
"labels": labels,
|
| 111 |
+
}
|
| 112 |
+
# print(result)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def process_math(sample, tokenizer):
|
| 117 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 118 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 119 |
+
source = sample["prompt"]
|
| 120 |
+
source = tokenizer.apply_chat_template(
|
| 121 |
+
[
|
| 122 |
+
{'role': 'user', 'content': source}
|
| 123 |
+
],
|
| 124 |
+
tokenize=False, add_generation_prompt=True
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 128 |
+
target = [IGNORE_INDEX] * len(source)
|
| 129 |
+
|
| 130 |
+
output = sample["output"]
|
| 131 |
+
output = tokenizer(output, add_special_tokens=False)["input_ids"]
|
| 132 |
+
|
| 133 |
+
source += output
|
| 134 |
+
target += output
|
| 135 |
+
|
| 136 |
+
input_ids = source
|
| 137 |
+
labels = target
|
| 138 |
+
|
| 139 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 140 |
+
labels.append(tokenizer.eos_token_id)
|
| 141 |
+
# if tokenizer.eos_token_id not in source:
|
| 142 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 143 |
+
# if tokenizer.eos_token_id not in labels:
|
| 144 |
+
# labels.append(tokenizer.eos_token_id)
|
| 145 |
+
|
| 146 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 147 |
+
# return None
|
| 148 |
+
result = {
|
| 149 |
+
"input_ids": input_ids,
|
| 150 |
+
"attention_mask": [1] * len(input_ids),
|
| 151 |
+
"labels": labels,
|
| 152 |
+
}
|
| 153 |
+
# print(result)
|
| 154 |
+
return result
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def print_function(example, tokenizer):
|
| 159 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
| 160 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
| 161 |
+
print("label_ids:\n{}".format(example["labels"]))
|
| 162 |
+
print("labels:\n{}".format(
|
| 163 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
| 164 |
+
))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def get_dataset(file_path, tokenizer, math_dataset=False):
|
| 168 |
+
dataset = load_dataset('json', data_files=file_path)
|
| 169 |
+
train_dataset = dataset["train"]
|
| 170 |
+
file_name = os.path.basename(file_path)
|
| 171 |
+
dataset_name = os.path.splitext(file_name)[0]
|
| 172 |
+
# print(f"dataset_name: {dataset_name}")
|
| 173 |
+
# if os.path.exists(f"input/real_cache/{dataset_name}/"):
|
| 174 |
+
# shutil.rmtree(f"input/real_cache/{dataset_name}/")
|
| 175 |
+
# os.makedirs(f"input/real_cache/{dataset_name}/", exist_ok=True)
|
| 176 |
+
# tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, cache_file_name=f"input/real_cache/{dataset_name}/cache1.arrow")
|
| 177 |
+
|
| 178 |
+
if math_dataset:
|
| 179 |
+
tokenized_dataset = train_dataset.map(process_math, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 180 |
+
else:
|
| 181 |
+
tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 182 |
+
print_function(next(iter(tokenized_dataset)), tokenizer)
|
| 183 |
+
print(f"len of dataset before filter: {len(tokenized_dataset)}")
|
| 184 |
+
|
| 185 |
+
print(f"type of tokenized_dataset: {type(tokenized_dataset)}")
|
| 186 |
+
|
| 187 |
+
filtered_dataset = []
|
| 188 |
+
for item in tokenized_dataset:
|
| 189 |
+
# if len(item["input_ids"]) <= 10000:
|
| 190 |
+
# print(item)
|
| 191 |
+
print(f"type of item: {type(item)}")
|
| 192 |
+
filtered_dataset.append(item)
|
| 193 |
+
print(f"len of dataset after filter: {len(filtered_dataset)}")
|
| 194 |
+
return filtered_dataset
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def train():
|
| 198 |
+
parser = transformers.HfArgumentParser(
|
| 199 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 200 |
+
)
|
| 201 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 202 |
+
|
| 203 |
+
print("==========Model Args=========")
|
| 204 |
+
print(model_args)
|
| 205 |
+
print("==========Data Args=========")
|
| 206 |
+
print(data_args)
|
| 207 |
+
print("==========Training Args=========")
|
| 208 |
+
print(training_args)
|
| 209 |
+
|
| 210 |
+
if training_args.gradient_checkpointing:
|
| 211 |
+
use_cache = False # use_cache与gradient_checkpointing不能同时设置为true
|
| 212 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 213 |
+
model_args.model_name_or_path,
|
| 214 |
+
_attn_implementation="flash_attention_2",
|
| 215 |
+
use_cache=use_cache,
|
| 216 |
+
# save_only_model=True
|
| 217 |
+
).float()
|
| 218 |
+
# model = AutoLigerKernelForCausalLM.from_pretrained(
|
| 219 |
+
# model_args.model_name_or_path,
|
| 220 |
+
# _attn_implementation="flash_attention_2",
|
| 221 |
+
# use_cache=use_cache,
|
| 222 |
+
# # save_only_model=True
|
| 223 |
+
# ).float()
|
| 224 |
+
if model_args.tokenizer_name_or_path is None:
|
| 225 |
+
model_args.tokenizer_name_or_path = model_args.model_name_or_path
|
| 226 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 227 |
+
model_args.tokenizer_name_or_path, model_max_length=training_args.model_max_length
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
if tokenizer.pad_token is None:
|
| 231 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 232 |
+
|
| 233 |
+
dataset_qa = get_dataset(data_args.data_path, tokenizer)
|
| 234 |
+
print(f"qa dataset: {len(dataset_qa)}")
|
| 235 |
+
dataset_math = get_dataset("/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/math_qwq_4524_selected_add_prompt_871.json", tokenizer, True)
|
| 236 |
+
print(f"math dataset: {len(dataset_math)}")
|
| 237 |
+
dataset = dataset_qa + dataset_math
|
| 238 |
+
# dataset = get_dataset(data_args.data_path, tokenizer)
|
| 239 |
+
print(f"fianl dataset: {len(dataset)}")
|
| 240 |
+
|
| 241 |
+
data_collator = DataCollatorForSeq2Seq(
|
| 242 |
+
tokenizer=tokenizer,
|
| 243 |
+
label_pad_token_id=-100,
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
trainer = Trainer(
|
| 247 |
+
model=model,
|
| 248 |
+
args=training_args,
|
| 249 |
+
tokenizer=tokenizer,
|
| 250 |
+
data_collator=data_collator,
|
| 251 |
+
train_dataset=dataset,
|
| 252 |
+
)
|
| 253 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 254 |
+
trainer.save_model(training_args.output_dir)
|
| 255 |
+
trainer.save_state()
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
if __name__ == "__main__":
|
| 259 |
+
torch.manual_seed(42)
|
| 260 |
+
train()
|
deep_search/sft/sft_2_math_sht_new_prompt.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import Optional, Dict, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import random_split
|
| 11 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 12 |
+
import transformers
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
from transformers import Trainer
|
| 15 |
+
import random
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from transformers import AutoModelForCausalLM, TrainingArguments
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
from transformers import DataCollatorForSeq2Seq
|
| 20 |
+
import shutil
|
| 21 |
+
|
| 22 |
+
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import matplotlib.pyplot as plt
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class ModelArguments:
|
| 31 |
+
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
| 32 |
+
# flash_attention: Optional[bool] = field(default=False)
|
| 33 |
+
tokenizer_name_or_path: Optional[str] = field(default=None)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class DataArguments:
|
| 38 |
+
data_path: str = field(
|
| 39 |
+
default=None, metadata={"help": "Path to the training data."}
|
| 40 |
+
)
|
| 41 |
+
prompt_type: Optional[str] = field(default="instruction")
|
| 42 |
+
dailog_augmentation: Optional[bool] = field(default=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class TrainingArguments(transformers.TrainingArguments):
|
| 47 |
+
cache_dir: Optional[str] = field(default=None)
|
| 48 |
+
optim: str = field(default="adamw_torch")
|
| 49 |
+
model_max_length: int = field(
|
| 50 |
+
default=512,
|
| 51 |
+
metadata={
|
| 52 |
+
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
| 53 |
+
},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
IGNORE_INDEX = -100
|
| 58 |
+
MAX_LENGTH = 2000
|
| 59 |
+
|
| 60 |
+
def process(sample, tokenizer):
|
| 61 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 62 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 63 |
+
source = sample["input"]
|
| 64 |
+
# print(source)
|
| 65 |
+
# print(tokenizer.bos_token)
|
| 66 |
+
# print(source == None)
|
| 67 |
+
# if tokenizer.bos_token not in source:
|
| 68 |
+
# source = tokenizer.apply_chat_template(
|
| 69 |
+
# [
|
| 70 |
+
# {'role': 'user', 'content': source}
|
| 71 |
+
# ],
|
| 72 |
+
# tokenize=False, add_generation_prompt=True
|
| 73 |
+
# )
|
| 74 |
+
# # print(source)
|
| 75 |
+
source = tokenizer.apply_chat_template(
|
| 76 |
+
[
|
| 77 |
+
{'role': 'user', 'content': source}
|
| 78 |
+
],
|
| 79 |
+
tokenize=False, add_generation_prompt=True
|
| 80 |
+
)
|
| 81 |
+
# print(source)
|
| 82 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 83 |
+
target = [IGNORE_INDEX] * len(source)
|
| 84 |
+
for output in sample["output"]:
|
| 85 |
+
for k, v in output.items():
|
| 86 |
+
if v is None:
|
| 87 |
+
continue
|
| 88 |
+
v_tokens = tokenizer(v, add_special_tokens=False)["input_ids"]
|
| 89 |
+
if k in ["gen"]:
|
| 90 |
+
source += v_tokens
|
| 91 |
+
target += v_tokens
|
| 92 |
+
elif k in ["doc_gen"]:
|
| 93 |
+
source += v_tokens
|
| 94 |
+
target += [IGNORE_INDEX] * len(v_tokens)
|
| 95 |
+
input_ids = source
|
| 96 |
+
labels = target
|
| 97 |
+
|
| 98 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 99 |
+
labels.append(tokenizer.eos_token_id)
|
| 100 |
+
# if tokenizer.eos_token_id not in source:
|
| 101 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 102 |
+
# if tokenizer.eos_token_id not in labels:
|
| 103 |
+
# labels.append(tokenizer.eos_token_id)
|
| 104 |
+
|
| 105 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 106 |
+
# return None
|
| 107 |
+
result = {
|
| 108 |
+
"input_ids": input_ids,
|
| 109 |
+
"attention_mask": [1] * len(input_ids),
|
| 110 |
+
"labels": labels,
|
| 111 |
+
}
|
| 112 |
+
# print(result)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def process_math(sample, tokenizer):
|
| 117 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
| 118 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
| 119 |
+
source = sample["prompt"]
|
| 120 |
+
source = tokenizer.apply_chat_template(
|
| 121 |
+
[
|
| 122 |
+
{'role': 'user', 'content': source}
|
| 123 |
+
],
|
| 124 |
+
tokenize=False, add_generation_prompt=True
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
source = tokenizer(source, add_special_tokens=False)["input_ids"]
|
| 128 |
+
target = [IGNORE_INDEX] * len(source)
|
| 129 |
+
|
| 130 |
+
output = sample["output"]
|
| 131 |
+
output = tokenizer(output, add_special_tokens=False)["input_ids"]
|
| 132 |
+
|
| 133 |
+
source += output
|
| 134 |
+
target += output
|
| 135 |
+
|
| 136 |
+
input_ids = source
|
| 137 |
+
labels = target
|
| 138 |
+
|
| 139 |
+
input_ids.append(tokenizer.eos_token_id)
|
| 140 |
+
labels.append(tokenizer.eos_token_id)
|
| 141 |
+
# if tokenizer.eos_token_id not in source:
|
| 142 |
+
# input_ids.append(tokenizer.eos_token_id)
|
| 143 |
+
# if tokenizer.eos_token_id not in labels:
|
| 144 |
+
# labels.append(tokenizer.eos_token_id)
|
| 145 |
+
|
| 146 |
+
# if len(input_ids) > MAX_LENGTH: # 删除过长的数据,防止爆显存
|
| 147 |
+
# return None
|
| 148 |
+
result = {
|
| 149 |
+
"input_ids": input_ids,
|
| 150 |
+
"attention_mask": [1] * len(input_ids),
|
| 151 |
+
"labels": labels,
|
| 152 |
+
}
|
| 153 |
+
# print(result)
|
| 154 |
+
return result
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def print_function(example, tokenizer):
|
| 159 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
| 160 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
| 161 |
+
print("label_ids:\n{}".format(example["labels"]))
|
| 162 |
+
print("labels:\n{}".format(
|
| 163 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
| 164 |
+
))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def get_dataset(file_path, tokenizer, math_dataset=False):
|
| 168 |
+
dataset = load_dataset('json', data_files=file_path)
|
| 169 |
+
train_dataset = dataset["train"]
|
| 170 |
+
file_name = os.path.basename(file_path)
|
| 171 |
+
dataset_name = os.path.splitext(file_name)[0]
|
| 172 |
+
# print(f"dataset_name: {dataset_name}")
|
| 173 |
+
# if os.path.exists(f"input/real_cache/{dataset_name}/"):
|
| 174 |
+
# shutil.rmtree(f"input/real_cache/{dataset_name}/")
|
| 175 |
+
# os.makedirs(f"input/real_cache/{dataset_name}/", exist_ok=True)
|
| 176 |
+
# tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, cache_file_name=f"input/real_cache/{dataset_name}/cache1.arrow")
|
| 177 |
+
|
| 178 |
+
if math_dataset:
|
| 179 |
+
tokenized_dataset = train_dataset.map(process_math, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 180 |
+
else:
|
| 181 |
+
tokenized_dataset = train_dataset.map(process, fn_kwargs={'tokenizer': tokenizer}, num_proc=1, load_from_cache_file=False)
|
| 182 |
+
print_function(next(iter(tokenized_dataset)), tokenizer)
|
| 183 |
+
print(f"len of dataset before filter: {len(tokenized_dataset)}")
|
| 184 |
+
|
| 185 |
+
print(f"type of tokenized_dataset: {type(tokenized_dataset)}")
|
| 186 |
+
|
| 187 |
+
filtered_dataset = []
|
| 188 |
+
for item in tokenized_dataset:
|
| 189 |
+
# if len(item["input_ids"]) <= 10000:
|
| 190 |
+
# print(item)
|
| 191 |
+
print(f"type of item: {type(item)}")
|
| 192 |
+
filtered_dataset.append(item)
|
| 193 |
+
print(f"len of dataset after filter: {len(filtered_dataset)}")
|
| 194 |
+
return filtered_dataset
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def train():
|
| 198 |
+
parser = transformers.HfArgumentParser(
|
| 199 |
+
(ModelArguments, DataArguments, TrainingArguments)
|
| 200 |
+
)
|
| 201 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 202 |
+
|
| 203 |
+
print("==========Model Args=========")
|
| 204 |
+
print(model_args)
|
| 205 |
+
print("==========Data Args=========")
|
| 206 |
+
print(data_args)
|
| 207 |
+
print("==========Training Args=========")
|
| 208 |
+
print(training_args)
|
| 209 |
+
|
| 210 |
+
if training_args.gradient_checkpointing:
|
| 211 |
+
use_cache = False # use_cache与gradient_checkpointing不能同时设置为true
|
| 212 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 213 |
+
model_args.model_name_or_path,
|
| 214 |
+
_attn_implementation="flash_attention_2",
|
| 215 |
+
use_cache=use_cache,
|
| 216 |
+
# save_only_model=True
|
| 217 |
+
).float()
|
| 218 |
+
# model = AutoLigerKernelForCausalLM.from_pretrained(
|
| 219 |
+
# model_args.model_name_or_path,
|
| 220 |
+
# _attn_implementation="flash_attention_2",
|
| 221 |
+
# use_cache=use_cache,
|
| 222 |
+
# # save_only_model=True
|
| 223 |
+
# ).float()
|
| 224 |
+
if model_args.tokenizer_name_or_path is None:
|
| 225 |
+
model_args.tokenizer_name_or_path = model_args.model_name_or_path
|
| 226 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
| 227 |
+
model_args.tokenizer_name_or_path, model_max_length=training_args.model_max_length
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
if tokenizer.pad_token is None:
|
| 231 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 232 |
+
|
| 233 |
+
dataset_qa = get_dataset(data_args.data_path, tokenizer)
|
| 234 |
+
print(f"qa dataset: {len(dataset_qa)}")
|
| 235 |
+
dataset_math = get_dataset("/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/math_qwq_4524_selected_add_prompt_871_sht_new_prompt.json", tokenizer, True)
|
| 236 |
+
print(f"math dataset: {len(dataset_math)}")
|
| 237 |
+
dataset = dataset_qa + dataset_math
|
| 238 |
+
# dataset = get_dataset(data_args.data_path, tokenizer)
|
| 239 |
+
print(f"fianl dataset: {len(dataset)}")
|
| 240 |
+
|
| 241 |
+
data_collator = DataCollatorForSeq2Seq(
|
| 242 |
+
tokenizer=tokenizer,
|
| 243 |
+
label_pad_token_id=-100,
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
trainer = Trainer(
|
| 247 |
+
model=model,
|
| 248 |
+
args=training_args,
|
| 249 |
+
tokenizer=tokenizer,
|
| 250 |
+
data_collator=data_collator,
|
| 251 |
+
train_dataset=dataset,
|
| 252 |
+
)
|
| 253 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 254 |
+
trainer.save_model(training_args.output_dir)
|
| 255 |
+
trainer.save_state()
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
if __name__ == "__main__":
|
| 259 |
+
torch.manual_seed(42)
|
| 260 |
+
train()
|
deep_search/sft/sft_logs/3-28.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/sft/sft_logs/4-1.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/sft/sft_logs/4-24-math_after_search.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
deep_search/sft/sft_logs/4-24-mixed_math.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|