Spaces:
Sleeping
Sleeping
File size: 4,381 Bytes
855c6c2 9d21c96 855c6c2 9d21c96 855c6c2 9d21c96 855c6c2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | #!/usr/bin/env python3
"""
Task Maistro Assistant - CLEAN DEPLOYMENT v3.0.0
Railway deployment with threading timeout fixes
"""
import gradio as gr
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
from task_maistro_production import graph as compiled_graph
from langchain_core.messages import HumanMessage
import time
# The graph is already compiled with stable in-memory backends in task_maistro_production.py
print("✅ Graph imported successfully")
print(f"🔍 Graph type: {type(compiled_graph)}")
print("🚀 Using pre-compiled graph with stable in-memory backends...")
print("Graph ready!")
def chat_with_assistant(message, history):
try:
# Check if OpenAI API key is available
openai_key = os.getenv("OPENAI_API_KEY")
if not openai_key:
return "Error: OPENAI_API_KEY no está configurada. Por favor, configura la variable de entorno."
# Create config with default values
config = {
"configurable": {
"user_id": "default-user",
"todo_category": "general",
"task_maistro_role": "You are a helpful task management assistant. You help you create, organize, and manage the user's ToDo list."
},
"thread_id": "default-thread"
}
# Create the input message
input_message = {"messages": [HumanMessage(content=message)]}
response = compiled_graph.invoke(input_message, config=config)
# Extract the assistant's response
assistant_message = response["messages"][-1].content
return assistant_message
except Exception as e:
error_msg = f"Error: {str(e)}"
print(f"Application error: {error_msg}")
return error_msg
def clear_chat():
"""Clear the chat history"""
return []
# Create the Gradio interface
with gr.Blocks(title="Task Maistro Assistant", theme=gr.themes.Soft()) as app:
gr.Markdown("# 🤖 Task Maistro Assistant")
gr.Markdown("""
Tu asistente personal para gestionar tareas y recordatorios. Comparte tus tareas conmigo y te ayudaré a organizarlas.
**🏗️ Arquitectura:**
- 🔴 **Redis**: Estado de conversación (checkpointer) y datos persistentes (store)
- 🧠 **LangGraph**: Motor de inteligencia artificial
""")
with gr.Row():
with gr.Column(scale=4):
chatbot = gr.Chatbot(
height=500,
placeholder="Hola! Soy tu asistente de tareas. ¿En qué puedo ayudarte hoy?",
)
with gr.Row():
access_key_input = gr.Textbox(
label="Clave de acceso",
placeholder="Ingresa la clave de acceso para usar el asistente",
type="password",
)
with gr.Row():
msg = gr.Textbox(
placeholder="Escribe tu mensaje aquí...",
scale=4,
container=False
)
send_btn = gr.Button("Enviar", variant="primary", scale=1)
clear_btn = gr.Button("Limpiar", variant="secondary", scale=1)
# Event handlers
def respond(message, history, access_key):
if access_key != os.getenv("ACCESS_KEY"):
return history, "Clave de acceso incorrecta. Por favor, inténtalo de nuevo."
if message.strip() == "":
return history, ""
# Get response from assistant
bot_response = chat_with_assistant(message, history)
# Add to history
history.append([message, bot_response])
return history, ""
# Bind events
msg.submit(respond, [msg, chatbot, access_key_input], [chatbot, msg])
send_btn.click(respond, [msg, chatbot, access_key_input], [chatbot, msg])
clear_btn.click(clear_chat, None, chatbot)
if __name__ == "__main__":
# Get port from environment variable (Railway sets this)
port = int(os.getenv("PORT", 7860)) # Changed default to match Dockerfile
# Launch the app
print(f"Starting application on port {port}")
app.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
show_error=True,
inbrowser=True
)
|