Update README.md
Browse files
README.md
CHANGED
|
@@ -14,15 +14,15 @@ Here is the checkpoint used in the paper **AceSearcher: Bootstrapping Reasoning
|
|
| 14 |
## Model Usage
|
| 15 |
For question decomposition on QA tasks:
|
| 16 |
```
|
| 17 |
-
prompt_plan_qa = """Please break down the question "{question}" into multiple specific sub-questions that address individual components of the original question.
|
| 18 |
-
Mark each sub-question with ### at the beginning. If you need to refer to answers from earlier sub-questions, use #1, #2, etc., to indicate the corresponding answers.
|
| 19 |
-
Decomposed Question:"""
|
| 20 |
-
|
| 21 |
from vllm import LLM, SamplingParams
|
| 22 |
model_path = "AceSearcher/AceSearcher-14B"
|
| 23 |
|
| 24 |
llm = LLM(model=model_path, tensor_parallel_size=1, gpu_memory_utilization=0.85, trust_remote_code=True)
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
prompt_qa = prompt_plan_qa.replace("{question}", question)
|
| 27 |
|
| 28 |
prompt = [
|
|
|
|
| 14 |
## Model Usage
|
| 15 |
For question decomposition on QA tasks:
|
| 16 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
from vllm import LLM, SamplingParams
|
| 18 |
model_path = "AceSearcher/AceSearcher-14B"
|
| 19 |
|
| 20 |
llm = LLM(model=model_path, tensor_parallel_size=1, gpu_memory_utilization=0.85, trust_remote_code=True)
|
| 21 |
|
| 22 |
+
prompt_plan_qa = """Please break down the question "{question}" into multiple specific sub-questions that address individual components of the original question.
|
| 23 |
+
Mark each sub-question with ### at the beginning. If you need to refer to answers from earlier sub-questions, use #1, #2, etc., to indicate the corresponding answers.
|
| 24 |
+
Decomposed Question:"""
|
| 25 |
+
|
| 26 |
prompt_qa = prompt_plan_qa.replace("{question}", question)
|
| 27 |
|
| 28 |
prompt = [
|