Commit
·
a1d301f
1
Parent(s):
6380113
fix the async generator issue and improve the chat interface:
Browse files
app.py
CHANGED
|
@@ -364,7 +364,7 @@ def convert_to_messages_format(chat_history):
|
|
| 364 |
|
| 365 |
return messages
|
| 366 |
|
| 367 |
-
async def stream_agent_response(question: str, chat_history: List) ->
|
| 368 |
"""Procesa la pregunta del usuario y devuelve la respuesta del agente."""
|
| 369 |
# Initialize response
|
| 370 |
response_text = ""
|
|
@@ -375,7 +375,8 @@ async def stream_agent_response(question: str, chat_history: List) -> tuple:
|
|
| 375 |
messages.extend(chat_history)
|
| 376 |
|
| 377 |
# Add user's question
|
| 378 |
-
|
|
|
|
| 379 |
|
| 380 |
if not agent:
|
| 381 |
error_msg = (
|
|
@@ -386,16 +387,20 @@ async def stream_agent_response(question: str, chat_history: List) -> tuple:
|
|
| 386 |
f"3. El modelo de lenguaje esté disponible\n\n"
|
| 387 |
f"Error: {agent_error}"
|
| 388 |
)
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
return
|
| 392 |
|
| 393 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 394 |
# Execute the agent
|
| 395 |
response = await agent.ainvoke({"input": question, "chat_history": chat_history})
|
| 396 |
|
| 397 |
# Process the response
|
| 398 |
-
if hasattr(response, 'output'):
|
| 399 |
response_text = response.output
|
| 400 |
|
| 401 |
# Check if the response contains an SQL query
|
|
@@ -408,19 +413,25 @@ async def stream_agent_response(question: str, chat_history: List) -> tuple:
|
|
| 408 |
response_text += f"\n\n### 🔍 Resultado de la consulta:\n```sql\n{sql_query}\n```\n\n{query_result}"
|
| 409 |
else:
|
| 410 |
response_text += "\n\n⚠️ No se pudo conectar a la base de datos para ejecutar la consulta."
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
else:
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
# Add assistant's response to the chat history
|
| 415 |
-
messages.append({"role": "assistant", "content": response_text})
|
| 416 |
|
| 417 |
-
#
|
| 418 |
-
|
| 419 |
|
| 420 |
except Exception as e:
|
| 421 |
error_msg = f"## ❌ Error\n\nOcurrió un error al procesar tu solicitud:\n\n```\n{str(e)}\n```"
|
| 422 |
-
|
| 423 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 424 |
|
| 425 |
# Custom CSS for the app
|
| 426 |
custom_css = """
|
|
@@ -643,24 +654,26 @@ def create_application():
|
|
| 643 |
async def bot_response(chat_history: List[Dict]) -> List[Dict]:
|
| 644 |
"""Get bot response and update chat history."""
|
| 645 |
if not chat_history or chat_history[-1]["role"] != "assistant":
|
| 646 |
-
|
|
|
|
| 647 |
|
| 648 |
try:
|
| 649 |
# Get the user's question (second to last message)
|
| 650 |
if len(chat_history) < 2:
|
| 651 |
-
|
|
|
|
| 652 |
|
| 653 |
question = chat_history[-2]["content"]
|
| 654 |
logger.info(f"Processing question: {question}")
|
| 655 |
|
| 656 |
# Call the agent and stream the response
|
| 657 |
async for response in stream_agent_response(question, chat_history[:-2]):
|
| 658 |
-
if isinstance(response,
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
|
| 664 |
|
| 665 |
logger.info("Response generation complete")
|
| 666 |
|
|
|
|
| 364 |
|
| 365 |
return messages
|
| 366 |
|
| 367 |
+
async def stream_agent_response(question: str, chat_history: List) -> List[Dict]:
|
| 368 |
"""Procesa la pregunta del usuario y devuelve la respuesta del agente."""
|
| 369 |
# Initialize response
|
| 370 |
response_text = ""
|
|
|
|
| 375 |
messages.extend(chat_history)
|
| 376 |
|
| 377 |
# Add user's question
|
| 378 |
+
user_message = {"role": "user", "content": question}
|
| 379 |
+
messages.append(user_message)
|
| 380 |
|
| 381 |
if not agent:
|
| 382 |
error_msg = (
|
|
|
|
| 387 |
f"3. El modelo de lenguaje esté disponible\n\n"
|
| 388 |
f"Error: {agent_error}"
|
| 389 |
)
|
| 390 |
+
assistant_message = {"role": "assistant", "content": error_msg}
|
| 391 |
+
messages.append(assistant_message)
|
| 392 |
+
return [assistant_message]
|
| 393 |
|
| 394 |
try:
|
| 395 |
+
# Add empty assistant message that will be updated
|
| 396 |
+
assistant_message = {"role": "assistant", "content": ""}
|
| 397 |
+
messages.append(assistant_message)
|
| 398 |
+
|
| 399 |
# Execute the agent
|
| 400 |
response = await agent.ainvoke({"input": question, "chat_history": chat_history})
|
| 401 |
|
| 402 |
# Process the response
|
| 403 |
+
if hasattr(response, 'output') and response.output:
|
| 404 |
response_text = response.output
|
| 405 |
|
| 406 |
# Check if the response contains an SQL query
|
|
|
|
| 413 |
response_text += f"\n\n### 🔍 Resultado de la consulta:\n```sql\n{sql_query}\n```\n\n{query_result}"
|
| 414 |
else:
|
| 415 |
response_text += "\n\n⚠️ No se pudo conectar a la base de datos para ejecutar la consulta."
|
| 416 |
+
|
| 417 |
+
# Update the assistant's message with the response
|
| 418 |
+
assistant_message["content"] = response_text
|
| 419 |
+
|
| 420 |
else:
|
| 421 |
+
assistant_message["content"] = "Error: No se recibió respuesta del agente."
|
|
|
|
|
|
|
|
|
|
| 422 |
|
| 423 |
+
# Return the assistant's message
|
| 424 |
+
return [assistant_message]
|
| 425 |
|
| 426 |
except Exception as e:
|
| 427 |
error_msg = f"## ❌ Error\n\nOcurrió un error al procesar tu solicitud:\n\n```\n{str(e)}\n```"
|
| 428 |
+
if "assistant_message" in locals():
|
| 429 |
+
assistant_message["content"] = error_msg
|
| 430 |
+
else:
|
| 431 |
+
assistant_message = {"role": "assistant", "content": error_msg}
|
| 432 |
+
|
| 433 |
+
logger.error(f"Error in stream_agent_response: {str(e)}", exc_info=True)
|
| 434 |
+
return [assistant_message]
|
| 435 |
|
| 436 |
# Custom CSS for the app
|
| 437 |
custom_css = """
|
|
|
|
| 654 |
async def bot_response(chat_history: List[Dict]) -> List[Dict]:
|
| 655 |
"""Get bot response and update chat history."""
|
| 656 |
if not chat_history or chat_history[-1]["role"] != "assistant":
|
| 657 |
+
yield chat_history
|
| 658 |
+
return
|
| 659 |
|
| 660 |
try:
|
| 661 |
# Get the user's question (second to last message)
|
| 662 |
if len(chat_history) < 2:
|
| 663 |
+
yield chat_history
|
| 664 |
+
return
|
| 665 |
|
| 666 |
question = chat_history[-2]["content"]
|
| 667 |
logger.info(f"Processing question: {question}")
|
| 668 |
|
| 669 |
# Call the agent and stream the response
|
| 670 |
async for response in stream_agent_response(question, chat_history[:-2]):
|
| 671 |
+
if isinstance(response, list):
|
| 672 |
+
for msg in response:
|
| 673 |
+
if msg["role"] == "assistant":
|
| 674 |
+
# Update the assistant's response
|
| 675 |
+
chat_history[-1] = msg
|
| 676 |
+
yield chat_history
|
| 677 |
|
| 678 |
logger.info("Response generation complete")
|
| 679 |
|