Commit ·
fbfec74
1
Parent(s): 32bf280
improving pasring of responses for new ReAct agent
Browse files- langgraphagent.py +5 -27
- reactlanggraphagent.py +12 -4
- utils.py +55 -0
langgraphagent.py
CHANGED
|
@@ -21,7 +21,7 @@ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
|
| 21 |
|
| 22 |
from custom_tools import get_custom_tools_list
|
| 23 |
from system_prompt import SYSTEM_PROMPT
|
| 24 |
-
from utils import cleanup_answer
|
| 25 |
import config
|
| 26 |
|
| 27 |
# Suppress BeautifulSoup GuessedAtParserWarning
|
|
@@ -252,36 +252,14 @@ class LangGraphAgent:
|
|
| 252 |
print(f"{'='*60}\n")
|
| 253 |
|
| 254 |
answer = response.get("answer")
|
| 255 |
-
if answer is None:
|
| 256 |
print("[WARNING] Agent completed but returned None as answer")
|
| 257 |
return "Error: No answer generated"
|
| 258 |
|
| 259 |
-
#
|
| 260 |
-
|
| 261 |
-
# If it's a dict, try to extract text field
|
| 262 |
-
if 'text' in answer:
|
| 263 |
-
answer = answer['text']
|
| 264 |
-
else:
|
| 265 |
-
answer = str(answer)
|
| 266 |
-
print(f"[WARNING] Answer was dict, extracted: {answer[:100]}")
|
| 267 |
-
elif isinstance(answer, list):
|
| 268 |
-
# If it's a list, extract text from each item
|
| 269 |
-
text_parts = []
|
| 270 |
-
for item in answer:
|
| 271 |
-
if isinstance(item, dict) and 'text' in item:
|
| 272 |
-
text_parts.append(item['text'])
|
| 273 |
-
else:
|
| 274 |
-
text_parts.append(str(item))
|
| 275 |
-
answer = " ".join(text_parts)
|
| 276 |
-
print(f"[WARNING] Answer was list, extracted: {answer[:100]}")
|
| 277 |
-
elif not isinstance(answer, str):
|
| 278 |
-
# Convert to string if it's any other type
|
| 279 |
-
answer = str(answer)
|
| 280 |
-
print(f"[WARNING] Answer was {type(answer)}, converted to string")
|
| 281 |
-
|
| 282 |
-
answer = answer.strip()
|
| 283 |
|
| 284 |
-
# Clean up the answer using utility function
|
| 285 |
answer = cleanup_answer(answer)
|
| 286 |
|
| 287 |
print(f"[FINAL ANSWER] {answer}")
|
|
|
|
| 21 |
|
| 22 |
from custom_tools import get_custom_tools_list
|
| 23 |
from system_prompt import SYSTEM_PROMPT
|
| 24 |
+
from utils import cleanup_answer, extract_text_from_content
|
| 25 |
import config
|
| 26 |
|
| 27 |
# Suppress BeautifulSoup GuessedAtParserWarning
|
|
|
|
| 252 |
print(f"{'='*60}\n")
|
| 253 |
|
| 254 |
answer = response.get("answer")
|
| 255 |
+
if not answer or answer is None:
|
| 256 |
print("[WARNING] Agent completed but returned None as answer")
|
| 257 |
return "Error: No answer generated"
|
| 258 |
|
| 259 |
+
# Use utility function to extract text from various content formats
|
| 260 |
+
answer = extract_text_from_content(answer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 261 |
|
| 262 |
+
# Clean up the answer using utility function (includes stripping)
|
| 263 |
answer = cleanup_answer(answer)
|
| 264 |
|
| 265 |
print(f"[FINAL ANSWER] {answer}")
|
reactlanggraphagent.py
CHANGED
|
@@ -15,7 +15,7 @@ from langchain_core.messages import HumanMessage
|
|
| 15 |
|
| 16 |
from custom_tools import get_custom_tools_list
|
| 17 |
from system_prompt import SYSTEM_PROMPT
|
| 18 |
-
from utils import cleanup_answer
|
| 19 |
import config
|
| 20 |
|
| 21 |
# Suppress BeautifulSoup GuessedAtParserWarning
|
|
@@ -63,7 +63,7 @@ class ReActLangGraphAgent:
|
|
| 63 |
agent_graph = create_react_agent(
|
| 64 |
model=self.llm,
|
| 65 |
tools=self.tools,
|
| 66 |
-
|
| 67 |
)
|
| 68 |
|
| 69 |
return agent_graph
|
|
@@ -140,9 +140,17 @@ class ReActLangGraphAgent:
|
|
| 140 |
|
| 141 |
# Get the last message (the agent's final response)
|
| 142 |
last_message = messages[-1]
|
| 143 |
-
answer = last_message.content if hasattr(last_message, 'content') else str(last_message)
|
| 144 |
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
print("[WARNING] Agent completed but returned None as answer")
|
| 147 |
return "Error: No answer generated"
|
| 148 |
|
|
|
|
| 15 |
|
| 16 |
from custom_tools import get_custom_tools_list
|
| 17 |
from system_prompt import SYSTEM_PROMPT
|
| 18 |
+
from utils import cleanup_answer, extract_text_from_content
|
| 19 |
import config
|
| 20 |
|
| 21 |
# Suppress BeautifulSoup GuessedAtParserWarning
|
|
|
|
| 63 |
agent_graph = create_react_agent(
|
| 64 |
model=self.llm,
|
| 65 |
tools=self.tools,
|
| 66 |
+
prompt=SYSTEM_PROMPT # System prompt is added via the prompt parameter
|
| 67 |
)
|
| 68 |
|
| 69 |
return agent_graph
|
|
|
|
| 140 |
|
| 141 |
# Get the last message (the agent's final response)
|
| 142 |
last_message = messages[-1]
|
|
|
|
| 143 |
|
| 144 |
+
# Extract content from the message
|
| 145 |
+
if hasattr(last_message, 'content'):
|
| 146 |
+
content = last_message.content
|
| 147 |
+
else:
|
| 148 |
+
content = str(last_message)
|
| 149 |
+
|
| 150 |
+
# Use utility function to extract text from various content formats
|
| 151 |
+
answer = extract_text_from_content(content)
|
| 152 |
+
|
| 153 |
+
if not answer or answer is None:
|
| 154 |
print("[WARNING] Agent completed but returned None as answer")
|
| 155 |
return "Error: No answer generated"
|
| 156 |
|
utils.py
CHANGED
|
@@ -48,6 +48,61 @@ def retry_with_backoff(
|
|
| 48 |
return decorator
|
| 49 |
|
| 50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
def cleanup_answer(answer: Any) -> str:
|
| 52 |
"""
|
| 53 |
Clean up the agent answer to ensure it's in plain text format.
|
|
|
|
| 48 |
return decorator
|
| 49 |
|
| 50 |
|
| 51 |
+
def extract_text_from_content(content: Any) -> str:
|
| 52 |
+
"""
|
| 53 |
+
Extract plain text from various content formats returned by LLM agents.
|
| 54 |
+
|
| 55 |
+
This function handles multiple content formats:
|
| 56 |
+
- String: Returns as-is
|
| 57 |
+
- Dict with 'text' field: Extracts the text value
|
| 58 |
+
- List of content blocks: Extracts text from all blocks with type='text'
|
| 59 |
+
- Other types: Converts to string
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
content: The content object from an LLM response (can be str, dict, list, etc.)
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
str: Extracted plain text content
|
| 66 |
+
"""
|
| 67 |
+
# Handle dict format (e.g., {'text': 'answer'})
|
| 68 |
+
if isinstance(content, dict):
|
| 69 |
+
if 'text' in content:
|
| 70 |
+
return str(content['text'])
|
| 71 |
+
else:
|
| 72 |
+
print(f"[WARNING] Content was dict without 'text' field, converting to string")
|
| 73 |
+
return str(content)
|
| 74 |
+
|
| 75 |
+
# Handle list format (e.g., [{'type': 'text', 'text': 'answer'}])
|
| 76 |
+
elif isinstance(content, list):
|
| 77 |
+
text_parts = []
|
| 78 |
+
for item in content:
|
| 79 |
+
if isinstance(item, dict):
|
| 80 |
+
# Look for items with type='text' and extract the 'text' field
|
| 81 |
+
if item.get('type') == 'text':
|
| 82 |
+
text_parts.append(str(item.get('text', '')))
|
| 83 |
+
# Fallback: if there's a 'text' field but no type, use it
|
| 84 |
+
elif 'text' in item:
|
| 85 |
+
text_parts.append(str(item['text']))
|
| 86 |
+
elif isinstance(item, str):
|
| 87 |
+
text_parts.append(item)
|
| 88 |
+
else:
|
| 89 |
+
text_parts.append(str(item))
|
| 90 |
+
|
| 91 |
+
result = ' '.join(text_parts)
|
| 92 |
+
if len(content) > 1 or (len(content) == 1 and isinstance(content[0], dict)):
|
| 93 |
+
print(f"[INFO] Extracted text from list with {len(content)} item(s)")
|
| 94 |
+
return result
|
| 95 |
+
|
| 96 |
+
# Handle string format (already plain text)
|
| 97 |
+
elif isinstance(content, str):
|
| 98 |
+
return content
|
| 99 |
+
|
| 100 |
+
# Fallback for other types
|
| 101 |
+
else:
|
| 102 |
+
print(f"[WARNING] Content was {type(content)}, converting to string")
|
| 103 |
+
return str(content)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
def cleanup_answer(answer: Any) -> str:
|
| 107 |
"""
|
| 108 |
Clean up the agent answer to ensure it's in plain text format.
|