File size: 8,011 Bytes
3b298be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
"""
Generative AI Agent with Tool Calling Capabilities

This module provides an intelligent agent that can use multiple tools (Wikipedia, Tavily)
to answer user queries with up-to-date and accurate information.
"""

import os
import yaml
import logging
from dotenv import load_dotenv

from langchain_openai import ChatOpenAI
from langchain_core.tools import Tool
from tools.wikipedia_tool import WikipediaTool
from langchain_tavily import TavilySearch
from langgraph.prebuilt import create_react_agent

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)



class GenerativeAIAgent:
    """
    An intelligent agent that uses LangGraph's ReAct pattern to answer queries.
    
    The agent can dynamically select and use tools (Wikipedia, Tavily Search) based on
    the user's query to provide accurate and up-to-date information.
    
    Attributes:
        llm: The language model (ChatOpenAI)
        tools: List of available tools for the agent
        agent_executor: The LangGraph ReAct agent executor
    """
    
    def __init__(self, config_path: str = "config.yaml"):
        """
        Initialize the GenerativeAIAgent with configuration and tools.
        
        Args:
            config_path: Path to the YAML configuration file
        """
        logger.info("Initializing GenerativeAIAgent...")
        
        # Load environment variables
        load_dotenv(dotenv_path="local/.env")
        
        # Load configuration
        self.config = self._load_config(config_path)
        
        # Initialize tools
        self.wikipedia_tool = WikipediaTool(config_path)
        self.tavily_search = TavilySearch(max_results=5)
        
        # Define available tools
        self.tools = self._initialize_tools()
        
        # Initialize language model
        self.llm = self._initialize_llm()
        
        # Create ReAct agent executor
        self.agent_executor = create_react_agent(self.llm, self.tools)
        
        logger.info("Agent initialized successfully with %d tools", len(self.tools))
    
    def _load_config(self, config_path: str) -> dict:
        """Load configuration from YAML file."""
        try:
            with open(config_path, "r") as file:
                config = yaml.safe_load(file)
                logger.info("Configuration loaded from %s", config_path)
                return config
        except FileNotFoundError:
            logger.error("Config file not found: %s", config_path)
            raise
        except yaml.YAMLError as e:
            logger.error("Error parsing config file: %s", e)
            raise
    
    def _initialize_tools(self) -> list:
        """Initialize and return the list of tools available to the agent."""
        tools = [
            Tool(
                name="Wikipedia",
                description=(
                    "Search Wikipedia for factual, encyclopedic information. "
                    "Best for: historical facts, scientific concepts, biographies, "
                    "general knowledge. Input should be a clear search query."
                ),
                func=self.wikipedia_tool.search
            ),
            Tool(
                name="Tavily",
                description=(
                    "Search the web for current information and latest news. "
                    "Best for: recent events, breaking news, current trends, "
                    "real-time data. Input should be a search query."
                ),
                func=self.tavily_search.invoke
            )
        ]
        logger.info("Initialized tools: %s", [tool.name for tool in tools])
        return tools
    
    def _initialize_llm(self) -> ChatOpenAI:
        """Initialize the language model with configuration."""
        model_config = self.config.get("openai", {})
        
        llm = ChatOpenAI(
            model=model_config.get("model", "gpt-5"),
            temperature=model_config.get("temperature", 0.7),
            max_tokens=model_config.get("max_tokens", 1000),
            api_key=os.getenv("OPENAI_API_KEY")
        )
        
        logger.info("LLM initialized: %s", model_config.get("model"))
        return llm

    
    def generate_response(self, user_input: str) -> str:
        """
        Generate a response to the user's input using the agent.
        
        The agent will automatically select and use appropriate tools based on the query,
        following the ReAct (Reasoning + Acting) pattern.
        
        Args:
            user_input: The user's question or query
            
        Returns:
            str: The agent's response
        """
        if not user_input or not user_input.strip():
            logger.warning("Empty input received")
            return "Please provide a valid question or query."
        
        try:
            logger.info("Processing query: %s", user_input[:50] + "..." if len(user_input) > 50 else user_input)
            
            # Get system prompt from config
            system_prompt = self.config.get("app", {}).get(
                "system_prompt",
                "You are a helpful AI assistant with access to Wikipedia and web search tools."
            )
            
            # Prepare messages for the agent
            messages = [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_input}
            ]
            
            # Invoke the agent executor
            response = self.agent_executor.invoke({"messages": messages})
            
            # Extract the final answer from the response
            final_answer = self._extract_final_answer(response)
            
            logger.info("Response generated successfully")
            return final_answer
            
        except Exception as e:
            logger.error("Error generating response: %s", str(e), exc_info=True)
            return self._format_error_message(str(e))
    
    def _extract_final_answer(self, response: dict) -> str:
        """
        Extract the final answer from the agent's response.
        
        Args:
            response: The response dictionary from the agent executor
            
        Returns:
            str: The extracted final answer
        """
        if isinstance(response, dict) and "messages" in response:
            # Iterate through messages in reverse to find the last AI message with content
            for msg in reversed(response["messages"]):
                if hasattr(msg, "content") and msg.content and msg.content.strip():
                    return msg.content.strip()
        
        # Fallback
        logger.warning("Could not extract proper answer from response")
        return "I apologize, but I couldn't generate a proper response. Please try rephrasing your question."
    
    def _format_error_message(self, error: str) -> str:
        """
        Format error messages in a user-friendly way.
        
        Args:
            error: The error message
            
        Returns:
            str: A formatted error message
        """
        if "rate limit" in error.lower():
            return "⚠️ Rate limit reached. Please wait a moment and try again."
        elif "api key" in error.lower():
            return "⚠️ API authentication error. Please check your API keys."
        elif "timeout" in error.lower():
            return "⚠️ Request timed out. Please try again."
        else:
            return f"⚠️ An error occurred: {error}\n\nPlease try rephrasing your question or try again later."
    
    def get_available_tools(self) -> list:
        """
        Get a list of available tools and their descriptions.
        
        Returns:
            list: List of dictionaries containing tool information
        """
        return [
            {
                "name": tool.name,
                "description": tool.description
            }
            for tool in self.tools
        ]