Spaces:
Sleeping
Sleeping
| """ | |
| General-Purpose ReAct Agent with LangGraph | |
| Uses the prebuilt create_react_agent which automatically handles: | |
| - The Think β Act β Observe loop | |
| - Tool calling and response routing | |
| - Deciding when to stop | |
| Much simpler than manually defining the graph! | |
| """ | |
| import os | |
| from typing import TypedDict, Optional | |
| from dotenv import load_dotenv | |
| from langgraph.prebuilt import create_react_agent | |
| from langchain_openai import ChatOpenAI | |
| from langchain_core.messages import HumanMessage, SystemMessage | |
| import yaml | |
| # Import all tools from the modular tools package | |
| from tools import ALL_TOOLS, MODEL_NAME | |
| # ============== SYSTEM PROMPT ============== | |
| load_dotenv() | |
| _HERE = os.path.dirname(os.path.abspath(__file__)) | |
| with open(os.path.join(_HERE, "prompts.yaml"), "r", encoding="utf-8") as f: | |
| PROMPTS = yaml.safe_load(f) | |
| SYSTEM_PROMPT = PROMPTS["SYSTEM_PROMPT"] | |
| # ============== CREATE THE AGENT ============== | |
| def create_agent(): | |
| """Create the ReAct agent using LangGraph's prebuilt function.""" | |
| llm = ChatOpenAI( | |
| model=MODEL_NAME, | |
| temperature=0, | |
| ) | |
| # This is all you need! LangGraph handles the rest. | |
| agent = create_react_agent( | |
| model=llm, | |
| tools=ALL_TOOLS, | |
| prompt=SYSTEM_PROMPT, # Adds system prompt to every call | |
| ) | |
| return agent | |
| # Create global agent instance | |
| agent = create_agent() | |
| # ============== MAIN INTERFACE ============== | |
| def run_agent(question: str, task_id: str = "", file_name: str = "", local_file_path: str = None) -> str: | |
| """ | |
| Run the ReAct agent on a question. | |
| Args: | |
| question: The question to answer | |
| task_id: Optional GAIA task ID (for file downloads) | |
| file_name: Optional filename hint | |
| local_file_path: Optional local path to pre-downloaded file | |
| Returns: | |
| The agent's final answer | |
| """ | |
| # Build message with context | |
| user_message = question | |
| if task_id: | |
| user_message += f"\n\n[Task ID: {task_id}]" | |
| if file_name: | |
| user_message += f"\n[Attached file: {file_name}]" | |
| if local_file_path: | |
| user_message += f"\n[File already downloaded to: {local_file_path}]" | |
| user_message += f"\n[Use read_file tool with this path to analyze the file]" | |
| # Run agent | |
| try: | |
| result = agent.invoke({ | |
| "messages": [HumanMessage(content=user_message)] | |
| }) | |
| # Get final answer from last message | |
| final_message = result["messages"][-1] | |
| answer = final_message.content | |
| return answer | |
| except Exception as e: | |
| return f"Agent error: {str(e)}" | |
| def run_agent_verbose(question: str, task_id: str = "", file_name: str = "", local_file_path: str = None) -> str: | |
| """Run the agent with verbose output showing each step.""" | |
| user_message = question | |
| if task_id: | |
| user_message += f"\n\n[Task ID: {task_id}]" | |
| if file_name: | |
| user_message += f"\n[Attached file: {file_name}]" | |
| if local_file_path: | |
| user_message += f"\n[File already downloaded to: {local_file_path}]" | |
| user_message += f"\n[Use read_file tool with this path to analyze the file]" | |
| print("\n" + "="*70) | |
| print("π€ ReAct Agent - Verbose Mode") | |
| print("="*70) | |
| print(f"\nπ Question: {question[:200]}{'...' if len(question) > 200 else ''}") | |
| if local_file_path: | |
| print(f"π File: {local_file_path}") | |
| print("\n" + "-"*70) | |
| try: | |
| # Stream through steps | |
| step_count = 0 | |
| for step in agent.stream({"messages": [HumanMessage(content=user_message)]}): | |
| step_count += 1 | |
| # Get the node name and output | |
| for node_name, node_output in step.items(): | |
| print(f"\nπ Step {step_count} - {node_name}") | |
| print("-"*40) | |
| if "messages" in node_output: | |
| for msg in node_output["messages"]: | |
| msg_type = type(msg).__name__ | |
| # Show tool calls | |
| if hasattr(msg, "tool_calls") and msg.tool_calls: | |
| print(f"π§ Tool calls requested:") | |
| for tc in msg.tool_calls: | |
| args_str = str(tc.get('args', {}))[:300] | |
| print(f" β {tc['name']}({args_str}{'...' if len(str(tc.get('args', {}))) > 300 else ''})") | |
| # Show tool results | |
| elif msg_type == "ToolMessage": | |
| content = str(msg.content)[:300] | |
| print(f"π Tool result: {content}{'...' if len(str(msg.content)) > 300 else ''}") | |
| # Show AI reasoning | |
| elif hasattr(msg, "content") and msg.content and msg_type == "AIMessage": | |
| content = msg.content[:400] | |
| print(f"π AI: {content}{'...' if len(msg.content) > 400 else ''}") | |
| # Get final result | |
| result = agent.invoke({"messages": [HumanMessage(content=user_message)]}) | |
| final_message = result["messages"][-1] | |
| answer = final_message.content | |
| print("\n" + "="*70) | |
| print(f"β Final Answer: {answer}") | |
| print("="*70 + "\n") | |
| return answer | |
| except Exception as e: | |
| import traceback | |
| print(f"\nβ Error: {str(e)}") | |
| traceback.print_exc() | |
| return f"Error: {str(e)}" | |
| # ============== TEST ============== | |
| if __name__ == "__main__": | |
| print("\n" + "="*70) | |
| print("Testing ReAct Agent (Prebuilt)") | |
| print("="*70) | |
| # Show available tools | |
| print(f"\nπ¦ Loaded {len(ALL_TOOLS)} tools:") | |
| for tool in ALL_TOOLS: | |
| print(f" - {tool.name}") | |
| # Test with verbose output | |
| test_question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia." | |
| print(f"\nπ§ͺ Test question: {test_question}") | |
| run_agent_verbose(test_question) | |