|
|
|
|
|
import os |
|
|
from openai import AsyncOpenAI |
|
|
import json |
|
|
|
|
|
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
|
|
|
|
|
tools = [ |
|
|
{ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "create_event", |
|
|
"description": "Create a calendar event to book an apartment viewing.", |
|
|
"parameters": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"start_time": { |
|
|
"type": "string", |
|
|
"description": "The start time of the event in ISO 8601 format, e.g., 2025-07-18T14:00:00", |
|
|
}, |
|
|
"duration_minutes": { |
|
|
"type": "integer", |
|
|
"description": "The duration of the event in minutes.", |
|
|
"default": 30 |
|
|
}, |
|
|
"summary": { |
|
|
"type": "string", |
|
|
"description": "A short summary or name for the event, e.g., 'Unit 5B viewing'", |
|
|
}, |
|
|
}, |
|
|
"required": ["start_time", "summary"], |
|
|
}, |
|
|
}, |
|
|
} |
|
|
] |
|
|
|
|
|
async def get_llm_response(messages: list, async_chunk_handler): |
|
|
""" |
|
|
Calls the OpenAI API and streams text chunks to a handler. |
|
|
This is now a regular async function, NOT a generator. |
|
|
It returns the final assistant message and any tool calls. |
|
|
""" |
|
|
try: |
|
|
stream = await client.chat.completions.create( |
|
|
model="gpt-4o-mini", |
|
|
messages=messages, |
|
|
stream=True, |
|
|
tools=tools, |
|
|
tool_choice="auto", |
|
|
) |
|
|
|
|
|
full_response = "" |
|
|
tool_calls = [] |
|
|
|
|
|
async for chunk in stream: |
|
|
delta = chunk.choices[0].delta |
|
|
if delta and delta.content: |
|
|
text_chunk = delta.content |
|
|
full_response += text_chunk |
|
|
|
|
|
await async_chunk_handler(text_chunk) |
|
|
|
|
|
if delta and delta.tool_calls: |
|
|
|
|
|
if not tool_calls: |
|
|
tool_calls = [{"id": tc.id, "type": "function", "function": {"name": None, "arguments": ""}} for tc in delta.tool_calls] |
|
|
|
|
|
for i, tool_call_chunk in enumerate(delta.tool_calls): |
|
|
if tool_call_chunk.function.name: |
|
|
tool_calls[i]["function"]["name"] = tool_call_chunk.function.name |
|
|
if tool_call_chunk.function.arguments: |
|
|
tool_calls[i]["function"]["arguments"] += tool_call_chunk.function.arguments |
|
|
|
|
|
|
|
|
assistant_message = {"role": "assistant", "content": full_response} |
|
|
if tool_calls: |
|
|
assistant_message["tool_calls"] = tool_calls |
|
|
|
|
|
|
|
|
return assistant_message, tool_calls |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error in get_llm_response: {e}") |
|
|
error_message = "I'm having a little trouble right now. Please try again in a moment." |
|
|
await async_chunk_handler(error_message) |
|
|
return {"role": "assistant", "content": error_message}, [] |