Update agent.py
Browse files
agent.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
|
| 2 |
|
| 3 |
-
|
| 4 |
from typing import Callable, Dict, List, Any
|
| 5 |
import time
|
| 6 |
import json
|
|
@@ -42,8 +42,8 @@ def format_gaia_response(model_type, last_observation, question_out):
|
|
| 42 |
get_llm_response = select_model(model_type)
|
| 43 |
|
| 44 |
# Process Gaia
|
| 45 |
-
|
| 46 |
-
|
| 47 |
|
| 48 |
gaia_prompt = (
|
| 49 |
f"{final_sys_prompt}\n\n"
|
|
@@ -52,7 +52,7 @@ def format_gaia_response(model_type, last_observation, question_out):
|
|
| 52 |
"Please review user questions and the last obervation and respond with the correct answer, in the correct format. No extra text, just the answer."
|
| 53 |
)
|
| 54 |
|
| 55 |
-
final_answer_out = get_llm_response(
|
| 56 |
|
| 57 |
return final_answer_out
|
| 58 |
|
|
@@ -65,11 +65,11 @@ class ImprovedAgent:
|
|
| 65 |
|
| 66 |
|
| 67 |
# Load system prompts from .txt files
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
|
| 74 |
def load_prompt(self, filepath: str) -> str:
|
| 75 |
with open(filepath, "r") as f:
|
|
@@ -122,7 +122,7 @@ History: {json.dumps(self.history, indent=2)}
|
|
| 122 |
planning_input = f"User Query: {query}"
|
| 123 |
print("-----Stage Plan-----")
|
| 124 |
|
| 125 |
-
plan_response = self.get_llm_response(self.
|
| 126 |
print("-----Plan Text-----")
|
| 127 |
print(plan_response)
|
| 128 |
print("-------------------")
|
|
@@ -140,7 +140,7 @@ History: {json.dumps(self.history, indent=2)}
|
|
| 140 |
# Step 2: Thought Agent
|
| 141 |
print("-----Stage Thought-----")
|
| 142 |
|
| 143 |
-
thought_response = self.get_llm_response(self.
|
| 144 |
print(thought_response)
|
| 145 |
parsed_thought = self.parse_json_response(thought_response)
|
| 146 |
print("-----Thought Parsed-----")
|
|
@@ -217,7 +217,7 @@ History: {json.dumps(self.history, indent=2)}
|
|
| 217 |
Tool Output: {observation}
|
| 218 |
"""
|
| 219 |
print("-----Stage Observe-----")
|
| 220 |
-
observation_response_text = self.get_llm_response(self.
|
| 221 |
|
| 222 |
print("-----Observation Parsed-----")
|
| 223 |
parsed_observation = self.parse_json_response(observation_response_text)
|
|
|
|
| 1 |
|
| 2 |
|
| 3 |
+
from langchain_core.output_parsers import PydanticOutputParser
|
| 4 |
from typing import Callable, Dict, List, Any
|
| 5 |
import time
|
| 6 |
import json
|
|
|
|
| 42 |
get_llm_response = select_model(model_type)
|
| 43 |
|
| 44 |
# Process Gaia
|
| 45 |
+
with open(base_dir+"/system_prompt_final.txt", "r") as f:
|
| 46 |
+
final_sys_prompt = f.read()
|
| 47 |
|
| 48 |
gaia_prompt = (
|
| 49 |
f"{final_sys_prompt}\n\n"
|
|
|
|
| 52 |
"Please review user questions and the last obervation and respond with the correct answer, in the correct format. No extra text, just the answer."
|
| 53 |
)
|
| 54 |
|
| 55 |
+
final_answer_out = get_llm_response(final_sys_prompt, gaia_prompt, reasoning_format = 'hidden')
|
| 56 |
|
| 57 |
return final_answer_out
|
| 58 |
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
# Load system prompts from .txt files
|
| 68 |
+
self.system_prompt_plan = self.load_prompt(base_dir+"/system_prompt_planning.txt")
|
| 69 |
+
self.system_prompt_thought = self.load_prompt(base_dir+"/system_prompt_thought.txt")
|
| 70 |
+
self.system_prompt_action = self.load_prompt(base_dir+"/system_prompt_action.txt")
|
| 71 |
+
self.system_prompt_observe = self.load_prompt(base_dir+"/system_prompt_observe.txt")
|
| 72 |
+
|
| 73 |
|
| 74 |
def load_prompt(self, filepath: str) -> str:
|
| 75 |
with open(filepath, "r") as f:
|
|
|
|
| 122 |
planning_input = f"User Query: {query}"
|
| 123 |
print("-----Stage Plan-----")
|
| 124 |
|
| 125 |
+
plan_response = self.get_llm_response(self.system_prompt_plan, planning_input)
|
| 126 |
print("-----Plan Text-----")
|
| 127 |
print(plan_response)
|
| 128 |
print("-------------------")
|
|
|
|
| 140 |
# Step 2: Thought Agent
|
| 141 |
print("-----Stage Thought-----")
|
| 142 |
|
| 143 |
+
thought_response = self.get_llm_response(self.system_prompt_thought, current_input)
|
| 144 |
print(thought_response)
|
| 145 |
parsed_thought = self.parse_json_response(thought_response)
|
| 146 |
print("-----Thought Parsed-----")
|
|
|
|
| 217 |
Tool Output: {observation}
|
| 218 |
"""
|
| 219 |
print("-----Stage Observe-----")
|
| 220 |
+
observation_response_text = self.get_llm_response(self.system_prompt_observe, observation_input)
|
| 221 |
|
| 222 |
print("-----Observation Parsed-----")
|
| 223 |
parsed_observation = self.parse_json_response(observation_response_text)
|