cas_task_1 / agent.py
AndreaBagno's picture
Initial commit
3b298be
"""
Generative AI Agent with Tool Calling Capabilities
This module provides an intelligent agent that can use multiple tools (Wikipedia, Tavily)
to answer user queries with up-to-date and accurate information.
"""
import os
import yaml
import logging
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.tools import Tool
from tools.wikipedia_tool import WikipediaTool
from langchain_tavily import TavilySearch
from langgraph.prebuilt import create_react_agent
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class GenerativeAIAgent:
"""
An intelligent agent that uses LangGraph's ReAct pattern to answer queries.
The agent can dynamically select and use tools (Wikipedia, Tavily Search) based on
the user's query to provide accurate and up-to-date information.
Attributes:
llm: The language model (ChatOpenAI)
tools: List of available tools for the agent
agent_executor: The LangGraph ReAct agent executor
"""
def __init__(self, config_path: str = "config.yaml"):
"""
Initialize the GenerativeAIAgent with configuration and tools.
Args:
config_path: Path to the YAML configuration file
"""
logger.info("Initializing GenerativeAIAgent...")
# Load environment variables
load_dotenv(dotenv_path="local/.env")
# Load configuration
self.config = self._load_config(config_path)
# Initialize tools
self.wikipedia_tool = WikipediaTool(config_path)
self.tavily_search = TavilySearch(max_results=5)
# Define available tools
self.tools = self._initialize_tools()
# Initialize language model
self.llm = self._initialize_llm()
# Create ReAct agent executor
self.agent_executor = create_react_agent(self.llm, self.tools)
logger.info("Agent initialized successfully with %d tools", len(self.tools))
def _load_config(self, config_path: str) -> dict:
"""Load configuration from YAML file."""
try:
with open(config_path, "r") as file:
config = yaml.safe_load(file)
logger.info("Configuration loaded from %s", config_path)
return config
except FileNotFoundError:
logger.error("Config file not found: %s", config_path)
raise
except yaml.YAMLError as e:
logger.error("Error parsing config file: %s", e)
raise
def _initialize_tools(self) -> list:
"""Initialize and return the list of tools available to the agent."""
tools = [
Tool(
name="Wikipedia",
description=(
"Search Wikipedia for factual, encyclopedic information. "
"Best for: historical facts, scientific concepts, biographies, "
"general knowledge. Input should be a clear search query."
),
func=self.wikipedia_tool.search
),
Tool(
name="Tavily",
description=(
"Search the web for current information and latest news. "
"Best for: recent events, breaking news, current trends, "
"real-time data. Input should be a search query."
),
func=self.tavily_search.invoke
)
]
logger.info("Initialized tools: %s", [tool.name for tool in tools])
return tools
def _initialize_llm(self) -> ChatOpenAI:
"""Initialize the language model with configuration."""
model_config = self.config.get("openai", {})
llm = ChatOpenAI(
model=model_config.get("model", "gpt-5"),
temperature=model_config.get("temperature", 0.7),
max_tokens=model_config.get("max_tokens", 1000),
api_key=os.getenv("OPENAI_API_KEY")
)
logger.info("LLM initialized: %s", model_config.get("model"))
return llm
def generate_response(self, user_input: str) -> str:
"""
Generate a response to the user's input using the agent.
The agent will automatically select and use appropriate tools based on the query,
following the ReAct (Reasoning + Acting) pattern.
Args:
user_input: The user's question or query
Returns:
str: The agent's response
"""
if not user_input or not user_input.strip():
logger.warning("Empty input received")
return "Please provide a valid question or query."
try:
logger.info("Processing query: %s", user_input[:50] + "..." if len(user_input) > 50 else user_input)
# Get system prompt from config
system_prompt = self.config.get("app", {}).get(
"system_prompt",
"You are a helpful AI assistant with access to Wikipedia and web search tools."
)
# Prepare messages for the agent
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_input}
]
# Invoke the agent executor
response = self.agent_executor.invoke({"messages": messages})
# Extract the final answer from the response
final_answer = self._extract_final_answer(response)
logger.info("Response generated successfully")
return final_answer
except Exception as e:
logger.error("Error generating response: %s", str(e), exc_info=True)
return self._format_error_message(str(e))
def _extract_final_answer(self, response: dict) -> str:
"""
Extract the final answer from the agent's response.
Args:
response: The response dictionary from the agent executor
Returns:
str: The extracted final answer
"""
if isinstance(response, dict) and "messages" in response:
# Iterate through messages in reverse to find the last AI message with content
for msg in reversed(response["messages"]):
if hasattr(msg, "content") and msg.content and msg.content.strip():
return msg.content.strip()
# Fallback
logger.warning("Could not extract proper answer from response")
return "I apologize, but I couldn't generate a proper response. Please try rephrasing your question."
def _format_error_message(self, error: str) -> str:
"""
Format error messages in a user-friendly way.
Args:
error: The error message
Returns:
str: A formatted error message
"""
if "rate limit" in error.lower():
return "⚠️ Rate limit reached. Please wait a moment and try again."
elif "api key" in error.lower():
return "⚠️ API authentication error. Please check your API keys."
elif "timeout" in error.lower():
return "⚠️ Request timed out. Please try again."
else:
return f"⚠️ An error occurred: {error}\n\nPlease try rephrasing your question or try again later."
def get_available_tools(self) -> list:
"""
Get a list of available tools and their descriptions.
Returns:
list: List of dictionaries containing tool information
"""
return [
{
"name": tool.name,
"description": tool.description
}
for tool in self.tools
]