Spaces:
Sleeping
Sleeping
File size: 4,579 Bytes
e103642 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | """AI Agent configuration for the Todo Chatbot.
Uses OpenAI Agents SDK with Gemini 2.0 Flash via OpenAI-compatible endpoint.
"""
from typing import List, Dict, Any, Optional
from uuid import UUID
from ..config import settings
from .prompts import SYSTEM_PROMPT
# Lazy initialization - only create when needed
_todo_agent = None
def get_todo_agent():
"""Get or create the Todo Agent (lazy initialization)."""
global _todo_agent
if _todo_agent is None:
from openai import AsyncOpenAI
from agents import Agent, OpenAIChatCompletionsModel
from .tools import add_task, list_tasks, complete_task, update_task, delete_task
if not settings.GEMINI_API_KEY:
raise ValueError("GEMINI_API_KEY environment variable is required")
client = AsyncOpenAI(
api_key=settings.GEMINI_API_KEY,
base_url=settings.GEMINI_API_ENDPOINT
)
model = OpenAIChatCompletionsModel(
model=settings.GEMINI_MODEL,
openai_client=client
)
_todo_agent = Agent(
name="TodoAssistant",
instructions=SYSTEM_PROMPT,
model=model,
tools=[
add_task,
list_tasks,
complete_task,
update_task,
delete_task,
]
)
return _todo_agent
# For backwards compatibility
todo_agent = None # Will be initialized lazily
async def run_agent(
user_id: UUID,
message: str,
conversation_history: Optional[List[Dict[str, str]]] = None
) -> Dict[str, Any]:
"""
Run the agent with a user message.
Args:
user_id: The authenticated user's UUID
message: The user's input message
conversation_history: Optional list of previous messages for context
Returns:
Dictionary with response and any tool calls made
"""
from openai import AsyncOpenAI
from agents import Agent, Runner, OpenAIChatCompletionsModel
from .tools import add_task, list_tasks, complete_task, update_task, delete_task
# Build input messages with context
messages = []
# Add conversation history if provided (limited to max context)
if conversation_history:
# Take only the last N messages as per config
recent_history = conversation_history[-settings.AGENT_MAX_CONTEXT_MESSAGES:]
messages.extend(recent_history)
# Add the current user message
messages.append({
"role": "user",
"content": message
})
# Check for API key
if not settings.GEMINI_API_KEY:
return {
"response": "AI assistant is not configured. Please set GEMINI_API_KEY.",
"tool_calls": [],
"status": "error",
"error": "GEMINI_API_KEY not configured"
}
try:
# Create client for this request
client = AsyncOpenAI(
api_key=settings.GEMINI_API_KEY,
base_url=settings.GEMINI_API_ENDPOINT
)
model = OpenAIChatCompletionsModel(
model=settings.GEMINI_MODEL,
openai_client=client
)
# Inject user_id into the context so tools can access it
user_context = f"\n\nCurrent user_id: {str(user_id)}"
agent_with_context = Agent(
name="TodoAssistant",
instructions=SYSTEM_PROMPT + user_context,
model=model,
tools=[
add_task,
list_tasks,
complete_task,
update_task,
delete_task,
]
)
# Run the agent using Runner
result = await Runner.run(
agent_with_context,
messages,
context={"user_id": str(user_id)}
)
# Extract tool calls if any
tool_calls = []
if hasattr(result, 'tool_calls') and result.tool_calls:
for tool_call in result.tool_calls:
tool_calls.append({
"tool": tool_call.name,
"result": tool_call.result if hasattr(tool_call, 'result') else {}
})
return {
"response": result.final_output if hasattr(result, 'final_output') else str(result),
"tool_calls": tool_calls,
"status": "success"
}
except Exception as e:
return {
"response": f"I encountered an error: {str(e)}. Please try again.",
"tool_calls": [],
"status": "error",
"error": str(e)
}
|