Spaces:
Sleeping
Sleeping
| import datetime | |
| from langchain.agents import create_openai_tools_agent, AgentExecutor | |
| from langchain.chat_models import init_chat_model | |
| from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
| from langchain_openai import ChatOpenAI | |
| from langchain_tavily import TavilySearch | |
| from langchain.schema import HumanMessage, SystemMessage | |
| from langchain_core.prompts import PromptTemplate | |
| from langchain_openai import ChatOpenAI | |
| from langchain_anthropic import ChatAnthropic | |
| from pydantic import BaseModel, Field | |
| from typing import List | |
| from enum import Enum | |
| from langchain_core.output_parsers import PydanticOutputParser | |
| from langchain_core.output_parsers import JsonOutputParser | |
| import os | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| from config import settings | |
| class Agents: | |
| def __init__(self, temperature: float = 0.3, model: str = "gpt-4.1", model_type: str = "openai", | |
| is_structured: bool = True): | |
| """ | |
| Initializes the UserSummaryGenerator with necessary settings and model configuration. | |
| """ | |
| self.settings = settings | |
| self.model_type = model_type | |
| self.llm_tavily = init_chat_model(model="gpt-4o", model_provider="openai", temperature=0) | |
| self.tavily_search_tool = TavilySearch(max_results=5, topic="general") | |
| self.tavily_prompt = ChatPromptTemplate.from_messages([ | |
| ("system", self.settings.COUNTY_RISK_ANALYSIS_PROMPT), | |
| MessagesPlaceholder(variable_name="messages"), | |
| MessagesPlaceholder(variable_name="agent_scratchpad"), # Required for tool calls | |
| ]) | |
| self.tavily_agent = create_openai_tools_agent( | |
| llm=self.llm_tavily, | |
| tools=[self.tavily_search_tool], | |
| prompt=self.tavily_prompt | |
| ) | |
| self.agent_executor = AgentExecutor(agent=self.tavily_agent, tools=[self.tavily_search_tool], verbose=False) | |
| def generate_tavily_search(self, user_input: str) -> str: | |
| response = self.agent_executor.invoke({"messages": [HumanMessage(content=user_input)]},) | |
| return response | |