mgbam commited on
Commit
056dfed
·
verified ·
1 Parent(s): 4587acc

Update llm_handler.py

Browse files
Files changed (1) hide show
  1. llm_handler.py +22 -20
llm_handler.py CHANGED
@@ -1,6 +1,7 @@
1
  # rentbot/llm_handler.py
2
  import os
3
  from openai import AsyncOpenAI
 
4
 
5
  client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
6
 
@@ -34,10 +35,11 @@ tools = [
34
  }
35
  ]
36
 
37
- async def get_llm_response(messages: list):
38
  """
39
- Yields response chunks from OpenAI's chat completion streaming API.
40
- Returns the full assistant message and any tool calls.
 
41
  """
42
  try:
43
  stream = await client.chat.completions.create(
@@ -53,33 +55,33 @@ async def get_llm_response(messages: list):
53
 
54
  async for chunk in stream:
55
  delta = chunk.choices[0].delta
56
- if delta.content:
57
  text_chunk = delta.content
58
  full_response += text_chunk
59
- yield text_chunk # Yield text chunks for real-time TTS
 
60
 
61
- if delta.tool_calls:
62
- # Accumulate tool call chunks
63
  if not tool_calls:
64
- tool_calls.extend(delta.tool_calls)
65
- else:
66
- for i, tool_call_chunk in enumerate(delta.tool_calls):
67
- if tool_call_chunk.function.arguments:
68
- tool_calls[i].function.arguments += tool_call_chunk.function.arguments
 
 
69
 
70
  # Construct the final assistant message object
71
  assistant_message = {"role": "assistant", "content": full_response}
72
  if tool_calls:
73
- assistant_message["tool_calls"] = [
74
- {
75
- "id": tc.id,
76
- "type": "function",
77
- "function": {"name": tc.function.name, "arguments": tc.function.arguments}
78
- } for tc in tool_calls
79
- ]
80
 
 
81
  return assistant_message, tool_calls
82
 
83
  except Exception as e:
84
  print(f"Error in get_llm_response: {e}")
85
- return {"role": "assistant", "content": "I'm having a little trouble right now. Please try again in a moment."}, []
 
 
 
1
  # rentbot/llm_handler.py
2
  import os
3
  from openai import AsyncOpenAI
4
+ import json
5
 
6
  client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
7
 
 
35
  }
36
  ]
37
 
38
+ async def get_llm_response(messages: list, async_chunk_handler):
39
  """
40
+ Calls the OpenAI API and streams text chunks to a handler.
41
+ This is now a regular async function, NOT a generator.
42
+ It returns the final assistant message and any tool calls.
43
  """
44
  try:
45
  stream = await client.chat.completions.create(
 
55
 
56
  async for chunk in stream:
57
  delta = chunk.choices[0].delta
58
+ if delta and delta.content:
59
  text_chunk = delta.content
60
  full_response += text_chunk
61
+ # Call the provided handler with the new chunk
62
+ await async_chunk_handler(text_chunk)
63
 
64
+ if delta and delta.tool_calls:
65
+ # This part handles accumulating tool call data from multiple chunks
66
  if not tool_calls:
67
+ tool_calls = [{"id": tc.id, "type": "function", "function": {"name": None, "arguments": ""}} for tc in delta.tool_calls]
68
+
69
+ for i, tool_call_chunk in enumerate(delta.tool_calls):
70
+ if tool_call_chunk.function.name:
71
+ tool_calls[i]["function"]["name"] = tool_call_chunk.function.name
72
+ if tool_call_chunk.function.arguments:
73
+ tool_calls[i]["function"]["arguments"] += tool_call_chunk.function.arguments
74
 
75
  # Construct the final assistant message object
76
  assistant_message = {"role": "assistant", "content": full_response}
77
  if tool_calls:
78
+ assistant_message["tool_calls"] = tool_calls
 
 
 
 
 
 
79
 
80
+ # This return is now VALID because there is no 'yield' in this function
81
  return assistant_message, tool_calls
82
 
83
  except Exception as e:
84
  print(f"Error in get_llm_response: {e}")
85
+ error_message = "I'm having a little trouble right now. Please try again in a moment."
86
+ await async_chunk_handler(error_message)
87
+ return {"role": "assistant", "content": error_message}, []