Spaces:
Sleeping
Sleeping
| """AI Agent configuration for the Todo Chatbot. | |
| Uses OpenAI Agents SDK with Gemini 2.0 Flash via OpenAI-compatible endpoint. | |
| """ | |
| from typing import List, Dict, Any, Optional | |
| from uuid import UUID | |
| from ..config import settings | |
| from .prompts import SYSTEM_PROMPT | |
| # Lazy initialization - only create when needed | |
| _todo_agent = None | |
| def get_todo_agent(): | |
| """Get or create the Todo Agent (lazy initialization).""" | |
| global _todo_agent | |
| if _todo_agent is None: | |
| from openai import AsyncOpenAI | |
| from agents import Agent, OpenAIChatCompletionsModel | |
| from .tools import add_task, list_tasks, complete_task, update_task, delete_task | |
| if not settings.GEMINI_API_KEY: | |
| raise ValueError("GEMINI_API_KEY environment variable is required") | |
| client = AsyncOpenAI( | |
| api_key=settings.GEMINI_API_KEY, | |
| base_url=settings.GEMINI_API_ENDPOINT | |
| ) | |
| model = OpenAIChatCompletionsModel( | |
| model=settings.GEMINI_MODEL, | |
| openai_client=client | |
| ) | |
| _todo_agent = Agent( | |
| name="TodoAssistant", | |
| instructions=SYSTEM_PROMPT, | |
| model=model, | |
| tools=[ | |
| add_task, | |
| list_tasks, | |
| complete_task, | |
| update_task, | |
| delete_task, | |
| ] | |
| ) | |
| return _todo_agent | |
| # For backwards compatibility | |
| todo_agent = None # Will be initialized lazily | |
| async def run_agent( | |
| user_id: UUID, | |
| message: str, | |
| conversation_history: Optional[List[Dict[str, str]]] = None | |
| ) -> Dict[str, Any]: | |
| """ | |
| Run the agent with a user message. | |
| Args: | |
| user_id: The authenticated user's UUID | |
| message: The user's input message | |
| conversation_history: Optional list of previous messages for context | |
| Returns: | |
| Dictionary with response and any tool calls made | |
| """ | |
| from openai import AsyncOpenAI | |
| from agents import Agent, Runner, OpenAIChatCompletionsModel | |
| from .tools import add_task, list_tasks, complete_task, update_task, delete_task | |
| # Build input messages with context | |
| messages = [] | |
| # Add conversation history if provided (limited to max context) | |
| if conversation_history: | |
| # Take only the last N messages as per config | |
| recent_history = conversation_history[-settings.AGENT_MAX_CONTEXT_MESSAGES:] | |
| messages.extend(recent_history) | |
| # Add the current user message | |
| messages.append({ | |
| "role": "user", | |
| "content": message | |
| }) | |
| # Check for API key | |
| if not settings.GEMINI_API_KEY: | |
| return { | |
| "response": "AI assistant is not configured. Please set GEMINI_API_KEY.", | |
| "tool_calls": [], | |
| "status": "error", | |
| "error": "GEMINI_API_KEY not configured" | |
| } | |
| try: | |
| # Create client for this request | |
| client = AsyncOpenAI( | |
| api_key=settings.GEMINI_API_KEY, | |
| base_url=settings.GEMINI_API_ENDPOINT | |
| ) | |
| model = OpenAIChatCompletionsModel( | |
| model=settings.GEMINI_MODEL, | |
| openai_client=client | |
| ) | |
| # Inject user_id into the context so tools can access it | |
| user_context = f"\n\nCurrent user_id: {str(user_id)}" | |
| agent_with_context = Agent( | |
| name="TodoAssistant", | |
| instructions=SYSTEM_PROMPT + user_context, | |
| model=model, | |
| tools=[ | |
| add_task, | |
| list_tasks, | |
| complete_task, | |
| update_task, | |
| delete_task, | |
| ] | |
| ) | |
| # Run the agent using Runner | |
| result = await Runner.run( | |
| agent_with_context, | |
| messages, | |
| context={"user_id": str(user_id)} | |
| ) | |
| # Extract tool calls if any | |
| tool_calls = [] | |
| if hasattr(result, 'tool_calls') and result.tool_calls: | |
| for tool_call in result.tool_calls: | |
| tool_calls.append({ | |
| "tool": tool_call.name, | |
| "result": tool_call.result if hasattr(tool_call, 'result') else {} | |
| }) | |
| return { | |
| "response": result.final_output if hasattr(result, 'final_output') else str(result), | |
| "tool_calls": tool_calls, | |
| "status": "success" | |
| } | |
| except Exception as e: | |
| return { | |
| "response": f"I encountered an error: {str(e)}. Please try again.", | |
| "tool_calls": [], | |
| "status": "error", | |
| "error": str(e) | |
| } | |