Update llm_handler.py
Browse files- llm_handler.py +6 -8
llm_handler.py
CHANGED
|
@@ -24,15 +24,13 @@ settings.stream = True
|
|
| 24 |
|
| 25 |
def send_to_llm(provider, msg_list):
|
| 26 |
try:
|
| 27 |
-
#
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
# Call get_chat_response with the formatted messages
|
| 34 |
-
response = agent.get_chat_response(formatted_messages, llm_sampling_settings=settings)
|
| 35 |
return response.content, None # We don't have usage info in this case
|
| 36 |
except Exception as e:
|
| 37 |
print(f"Error in send_to_llm: {str(e)}")
|
| 38 |
return f"Error: {str(e)}", None
|
|
|
|
|
|
| 24 |
|
| 25 |
def send_to_llm(provider, msg_list):
|
| 26 |
try:
|
| 27 |
+
# Concatenate all messages into a single string
|
| 28 |
+
full_message = "\n".join([f"{msg['role']}: {msg['content']}" for msg in msg_list])
|
| 29 |
+
|
| 30 |
+
# Call get_chat_response with the full message string
|
| 31 |
+
response = agent.get_chat_response(full_message, llm_sampling_settings=settings)
|
|
|
|
|
|
|
|
|
|
| 32 |
return response.content, None # We don't have usage info in this case
|
| 33 |
except Exception as e:
|
| 34 |
print(f"Error in send_to_llm: {str(e)}")
|
| 35 |
return f"Error: {str(e)}", None
|
| 36 |
+
|