Commit
·
054483f
1
Parent(s):
1b32465
removing unnecessary logging parts, refinement of simple execution, adding previous final answer check
Browse files- src/nodes.py +17 -46
- src/schemas.py +1 -1
- src/state.py +1 -0
- src/workflow_test.ipynb +0 -0
src/nodes.py
CHANGED
|
@@ -13,7 +13,7 @@ from prompts.prompts import (
|
|
| 13 |
CRITIC_PROMPT,
|
| 14 |
)
|
| 15 |
|
| 16 |
-
from config import llm_reasoning, TOOLS, planner_llm, llm_with_tools, llm_deterministic, llm_criticist
|
| 17 |
from schemas import PlannerPlan, ComplexityLevel, CritiqueFeedback, ExecutionReport, ToolExecution
|
| 18 |
|
| 19 |
from utils.utils import (
|
|
@@ -38,16 +38,14 @@ def _build_planner_prompt(state: AgentState, extra_context: Optional[str] = None
|
|
| 38 |
|
| 39 |
def query_input(state : AgentState) -> AgentState:
|
| 40 |
log_stage("USER QUERY", icon="💡")
|
| 41 |
-
#print("=== USER QUERY TRANSFERED TO AGENT ===")
|
| 42 |
|
| 43 |
files = state.get("files", [])
|
| 44 |
if files:
|
| 45 |
-
print(f"Processing {len(files)} files:")
|
| 46 |
log_stage("FILE PREPARATION", subtitle=f"Processing {len(files)} file(s)", icon="📁")
|
| 47 |
file_info = preprocess_files(files)
|
| 48 |
|
| 49 |
for file_path, info in file_info.items():
|
| 50 |
-
print(f" - {file_path}: {info['type']} ({info['size']} bytes) -> {info['suggested_tool']}")
|
| 51 |
log_key_values(
|
| 52 |
[
|
| 53 |
("path", file_path),
|
|
@@ -103,13 +101,7 @@ def planner(state : AgentState) -> AgentState:
|
|
| 103 |
|
| 104 |
def agent(state: AgentState) -> AgentState:
|
| 105 |
|
| 106 |
-
|
| 107 |
-
sys_msg = SystemMessage(
|
| 108 |
-
content=SYSTEM_EXECUTOR_PROMPT.strip().format(
|
| 109 |
-
plan=json.dumps(state["plan"], indent=2)
|
| 110 |
-
)
|
| 111 |
-
)
|
| 112 |
-
"""
|
| 113 |
current_step = state.get("current_step", 0)
|
| 114 |
reasoning_done = state.get("reasoning_done", False)
|
| 115 |
plan: Optional[PlannerPlan] = state.get("plan")
|
|
@@ -117,17 +109,6 @@ def agent(state: AgentState) -> AgentState:
|
|
| 117 |
|
| 118 |
#steps = state["plan"].steps
|
| 119 |
|
| 120 |
-
"""
|
| 121 |
-
print(f"=== AGENT DEBUG ===")
|
| 122 |
-
print(f"Current step: {current_step}")
|
| 123 |
-
print(f"Reasoning done: {reasoning_done}")
|
| 124 |
-
print(f"Plan exists: {plan is not None}")
|
| 125 |
-
print(f"Total steps in plan: {len(plan.steps) if plan else 'No plan'}")
|
| 126 |
-
|
| 127 |
-
if not plan or not hasattr(plan, 'steps') or not plan.steps:
|
| 128 |
-
print("ERROR: No valid plan found!")
|
| 129 |
-
"""
|
| 130 |
-
|
| 131 |
if not plan or not hasattr(plan, 'steps'):
|
| 132 |
log_stage("PLAN VALIDATION", subtitle="Planner returned no actionable steps", icon="⚠️")
|
| 133 |
warning = AIMessage(content="No valid plan available. <FINAL_ANSWER>")
|
|
@@ -157,7 +138,6 @@ def agent(state: AgentState) -> AgentState:
|
|
| 157 |
}
|
| 158 |
|
| 159 |
current_step_info = steps[current_step]
|
| 160 |
-
#print(f"Executing step {current_step + 1}: {current_step_info.description}")
|
| 161 |
|
| 162 |
log_stage(
|
| 163 |
"EXECUTION",
|
|
@@ -201,18 +181,9 @@ def agent(state: AgentState) -> AgentState:
|
|
| 201 |
|
| 202 |
if not reasoning_done:
|
| 203 |
|
| 204 |
-
instruction = HumanMessage(
|
| 205 |
-
content=(
|
| 206 |
-
"Provide reasoning for this step inside <REASONING>...</REASONING>. "
|
| 207 |
-
"Do not call any tools yet."
|
| 208 |
-
)
|
| 209 |
-
)
|
| 210 |
-
stack = [system_message] + state["messages"] + [instruction]
|
| 211 |
-
reasoning_response = llm_reasoning.invoke(stack) #default llm
|
| 212 |
log_stage("REASONING", subtitle=f"{current_step_info.id}", icon="🧠")
|
| 213 |
-
print(reasoning_response.content)
|
| 214 |
|
| 215 |
-
# ✅ ДОБАВЛЕНО: Специальный контекст для файлов
|
| 216 |
file_context = ""
|
| 217 |
file_contents = state.get("file_contents", {})
|
| 218 |
if file_contents:
|
|
@@ -242,8 +213,8 @@ def agent(state: AgentState) -> AgentState:
|
|
| 242 |
stack = [sys_msg] + state["messages"]
|
| 243 |
|
| 244 |
step = llm_reasoning.invoke(stack)
|
| 245 |
-
print("=== REASONING STEP ===")
|
| 246 |
-
print(step.content)
|
| 247 |
|
| 248 |
return {
|
| 249 |
"messages" : state["messages"] + [step],
|
|
@@ -266,7 +237,7 @@ def agent(state: AgentState) -> AgentState:
|
|
| 266 |
# Используем модель С инструментами для выполнения
|
| 267 |
step = llm_with_tools.invoke(stack)
|
| 268 |
print("=== TOOL EXECUTION ===")
|
| 269 |
-
print(step)
|
| 270 |
print(f"Tool calls: {step.tool_calls}")
|
| 271 |
|
| 272 |
return {
|
|
@@ -278,7 +249,7 @@ def agent(state: AgentState) -> AgentState:
|
|
| 278 |
def should_continue(state : AgentState) -> bool:
|
| 279 |
|
| 280 |
last_message = state["messages"][-1]
|
| 281 |
-
print(f"=== LAST MESSAGE WAS: {last_message} ===")
|
| 282 |
reasoning_done = state.get("reasoning_done", False)
|
| 283 |
plan = state.get("plan", None)
|
| 284 |
current_step = state.get("current_step", 0)
|
|
@@ -401,7 +372,7 @@ def enhanced_finalizer(state: AgentState) -> AgentState:
|
|
| 401 |
|
| 402 |
# Format final answer for user
|
| 403 |
formatted_answer = format_final_answer(execution_report, state.get('complexity_assessment', {}))
|
| 404 |
-
print(execution_report)
|
| 405 |
return {
|
| 406 |
"execution_report": execution_report,
|
| 407 |
"final_answer": formatted_answer
|
|
@@ -422,7 +393,7 @@ def simple_executor(state: AgentState) -> AgentState:
|
|
| 422 |
Provide a clear, concise answer.
|
| 423 |
"""
|
| 424 |
|
| 425 |
-
response =
|
| 426 |
SystemMessage(content=simple_prompt),
|
| 427 |
HumanMessage(content=state['query'])
|
| 428 |
])
|
|
@@ -574,11 +545,11 @@ def replanner_old(state: AgentState) -> AgentState:
|
|
| 574 |
isinstance(msg, HumanMessage)):
|
| 575 |
essential_messages.append(msg)
|
| 576 |
|
| 577 |
-
print(f"Cleaned message history: {len(current_messages)} -> {len(essential_messages)} messages")
|
| 578 |
-
print("=== ESSENTIAL MESSAGES ===")
|
| 579 |
-
print(essential_messages)
|
| 580 |
-
print("=== AGENT STATE ===")
|
| 581 |
-
print(state["messages"])
|
| 582 |
|
| 583 |
return {
|
| 584 |
"plan": revised_plan,
|
|
@@ -621,7 +592,7 @@ def replanner(state: AgentState) -> AgentState:
|
|
| 621 |
|
| 622 |
# ИСПРАВЛЕНИЕ: Сохраняем важные результаты инструментов
|
| 623 |
current_messages = state.get("messages", [])
|
| 624 |
-
|
| 625 |
# Находим полезные результаты инструментов
|
| 626 |
preserved_messages = []
|
| 627 |
tool_results = {}
|
|
@@ -660,7 +631,7 @@ def replanner(state: AgentState) -> AgentState:
|
|
| 660 |
preserved_messages.append(msg)
|
| 661 |
|
| 662 |
print(f"Preserved {len(tool_results)} tool results")
|
| 663 |
-
print(f"Cleaned message history: {len(current_messages)} -> {len(preserved_messages)} messages")
|
| 664 |
|
| 665 |
# Добавляем контекст о доступных результатах
|
| 666 |
if tool_results:
|
|
|
|
| 13 |
CRITIC_PROMPT,
|
| 14 |
)
|
| 15 |
|
| 16 |
+
from config import llm_reasoning, TOOLS, planner_llm, llm_with_tools, llm_deterministic, llm_criticist, llm_simple_executor, llm_simple_with_tools
|
| 17 |
from schemas import PlannerPlan, ComplexityLevel, CritiqueFeedback, ExecutionReport, ToolExecution
|
| 18 |
|
| 19 |
from utils.utils import (
|
|
|
|
| 38 |
|
| 39 |
def query_input(state : AgentState) -> AgentState:
|
| 40 |
log_stage("USER QUERY", icon="💡")
|
|
|
|
| 41 |
|
| 42 |
files = state.get("files", [])
|
| 43 |
if files:
|
|
|
|
| 44 |
log_stage("FILE PREPARATION", subtitle=f"Processing {len(files)} file(s)", icon="📁")
|
| 45 |
file_info = preprocess_files(files)
|
| 46 |
|
| 47 |
for file_path, info in file_info.items():
|
| 48 |
+
#print(f" - {file_path}: {info['type']} ({info['size']} bytes) -> {info['suggested_tool']}")
|
| 49 |
log_key_values(
|
| 50 |
[
|
| 51 |
("path", file_path),
|
|
|
|
| 101 |
|
| 102 |
def agent(state: AgentState) -> AgentState:
|
| 103 |
|
| 104 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
current_step = state.get("current_step", 0)
|
| 106 |
reasoning_done = state.get("reasoning_done", False)
|
| 107 |
plan: Optional[PlannerPlan] = state.get("plan")
|
|
|
|
| 109 |
|
| 110 |
#steps = state["plan"].steps
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
if not plan or not hasattr(plan, 'steps'):
|
| 113 |
log_stage("PLAN VALIDATION", subtitle="Planner returned no actionable steps", icon="⚠️")
|
| 114 |
warning = AIMessage(content="No valid plan available. <FINAL_ANSWER>")
|
|
|
|
| 138 |
}
|
| 139 |
|
| 140 |
current_step_info = steps[current_step]
|
|
|
|
| 141 |
|
| 142 |
log_stage(
|
| 143 |
"EXECUTION",
|
|
|
|
| 181 |
|
| 182 |
if not reasoning_done:
|
| 183 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
log_stage("REASONING", subtitle=f"{current_step_info.id}", icon="🧠")
|
| 185 |
+
#print(reasoning_response.content)
|
| 186 |
|
|
|
|
| 187 |
file_context = ""
|
| 188 |
file_contents = state.get("file_contents", {})
|
| 189 |
if file_contents:
|
|
|
|
| 213 |
stack = [sys_msg] + state["messages"]
|
| 214 |
|
| 215 |
step = llm_reasoning.invoke(stack)
|
| 216 |
+
#print("=== REASONING STEP ===")
|
| 217 |
+
#print(step.content)
|
| 218 |
|
| 219 |
return {
|
| 220 |
"messages" : state["messages"] + [step],
|
|
|
|
| 237 |
# Используем модель С инструментами для выполнения
|
| 238 |
step = llm_with_tools.invoke(stack)
|
| 239 |
print("=== TOOL EXECUTION ===")
|
| 240 |
+
#print(step)
|
| 241 |
print(f"Tool calls: {step.tool_calls}")
|
| 242 |
|
| 243 |
return {
|
|
|
|
| 249 |
def should_continue(state : AgentState) -> bool:
|
| 250 |
|
| 251 |
last_message = state["messages"][-1]
|
| 252 |
+
#print(f"=== LAST MESSAGE WAS: {last_message} ===")
|
| 253 |
reasoning_done = state.get("reasoning_done", False)
|
| 254 |
plan = state.get("plan", None)
|
| 255 |
current_step = state.get("current_step", 0)
|
|
|
|
| 372 |
|
| 373 |
# Format final answer for user
|
| 374 |
formatted_answer = format_final_answer(execution_report, state.get('complexity_assessment', {}))
|
| 375 |
+
#print(execution_report)
|
| 376 |
return {
|
| 377 |
"execution_report": execution_report,
|
| 378 |
"final_answer": formatted_answer
|
|
|
|
| 393 |
Provide a clear, concise answer.
|
| 394 |
"""
|
| 395 |
|
| 396 |
+
response = llm_simple_with_tools.invoke([
|
| 397 |
SystemMessage(content=simple_prompt),
|
| 398 |
HumanMessage(content=state['query'])
|
| 399 |
])
|
|
|
|
| 545 |
isinstance(msg, HumanMessage)):
|
| 546 |
essential_messages.append(msg)
|
| 547 |
|
| 548 |
+
#print(f"Cleaned message history: {len(current_messages)} -> {len(essential_messages)} messages")
|
| 549 |
+
#print("=== ESSENTIAL MESSAGES ===")
|
| 550 |
+
#print(essential_messages)
|
| 551 |
+
#print("=== AGENT STATE ===")
|
| 552 |
+
#print(state["messages"])
|
| 553 |
|
| 554 |
return {
|
| 555 |
"plan": revised_plan,
|
|
|
|
| 592 |
|
| 593 |
# ИСПРАВЛЕНИЕ: Сохраняем важные результаты инструментов
|
| 594 |
current_messages = state.get("messages", [])
|
| 595 |
+
state["previous_final_answer"] = state.get("final_answer", "")
|
| 596 |
# Находим полезные результаты инструментов
|
| 597 |
preserved_messages = []
|
| 598 |
tool_results = {}
|
|
|
|
| 631 |
preserved_messages.append(msg)
|
| 632 |
|
| 633 |
print(f"Preserved {len(tool_results)} tool results")
|
| 634 |
+
#print(f"Cleaned message history: {len(current_messages)} -> {len(preserved_messages)} messages")
|
| 635 |
|
| 636 |
# Добавляем контекст о доступных результатах
|
| 637 |
if tool_results:
|
src/schemas.py
CHANGED
|
@@ -67,7 +67,7 @@ class ExecutionReport(BaseModel):
|
|
| 67 |
assumptions_made: List[str] = Field(default_factory=list, description="Any assumptions made during execution")
|
| 68 |
confidence_level: Literal["low", "medium", "high"] = Field(description="Confidence in the answer")
|
| 69 |
limitations: List[str] = Field(default_factory=list, description="Known limitations or caveats")
|
| 70 |
-
final_answer: str = Field(description="
|
| 71 |
|
| 72 |
class Config:
|
| 73 |
extra = "forbid"
|
|
|
|
| 67 |
assumptions_made: List[str] = Field(default_factory=list, description="Any assumptions made during execution")
|
| 68 |
confidence_level: Literal["low", "medium", "high"] = Field(description="Confidence in the answer")
|
| 69 |
limitations: List[str] = Field(default_factory=list, description="Known limitations or caveats")
|
| 70 |
+
final_answer: str = Field(description="NO OTHER WORDS EXCEPT THESE RULES: Formatting rules: 1. If the question asks for a *first name*, output the first given name only.\n 2. If the answer is purely numeric, output digits only (no commas, units, words) as a string. \n 3. Otherwise capitalize the first character of your answer **unless** doing so would change the original spelling of text you are quoting verbatim")
|
| 71 |
|
| 72 |
class Config:
|
| 73 |
extra = "forbid"
|
src/state.py
CHANGED
|
@@ -20,4 +20,5 @@ class AgentState(MessagesState):
|
|
| 20 |
max_iterations: int
|
| 21 |
execution_report : ExecutionReport
|
| 22 |
previous_tool_results: Dict[str, str] # НОВОЕ ПОЛЕ для сохранения результатов
|
|
|
|
| 23 |
|
|
|
|
| 20 |
max_iterations: int
|
| 21 |
execution_report : ExecutionReport
|
| 22 |
previous_tool_results: Dict[str, str] # НОВОЕ ПОЛЕ для сохранения результатов
|
| 23 |
+
previous_final_answer: str # НОВОЕ ПОЛЕ для сохранения предыдущих окончательных ответов
|
| 24 |
|
src/workflow_test.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|