JacekAI / app.py
Jacek Zadrożny
Naprawiony format wymiany z chatem
3a40a92
"""
Gradio UI for the A11y Expert Agent with lazy initialization.
This module creates a Gradio ChatInterface that starts FAST,
then initializes the agent in the background.
"""
import sys
import os
# Suppress asyncio cleanup warnings by setting environment variable
os.environ['PYTHONUNBUFFERED'] = '1'
# Suppress all asyncio warnings at the earliest possible point
import warnings
warnings.filterwarnings('ignore', category=ResourceWarning)
import gradio as gr
from loguru import logger
import atexit
import threading
from agent.a11y_agent import create_agent, A11yExpertAgent
from config import get_settings
# --- Setup ---
# Configure logger
logger.remove()
logger.add(sys.stderr, level=get_settings().log_level)
# Global agent instance
agent_instance: A11yExpertAgent = None
agent_ready = False
agent_error = None
# --- Agent Initialization ---
def initialize_agent_background():
"""Initialize the agent in background thread."""
global agent_instance, agent_ready, agent_error
try:
logger.info("🔄 Starting agent initialization in background...")
import time
logger.info("⏱️ Sleeping 2 seconds to avoid race condition...")
time.sleep(2)
logger.info("📦 Calling create_agent()...")
agent_instance = create_agent()
logger.info("✓ Agent instance created, setting ready flag...")
agent_ready = True
logger.success("✅ A11y Expert Agent is ready!")
except Exception as e:
logger.error(f"❌ Failed to initialize agent: {e}")
import traceback
logger.error(traceback.format_exc())
agent_error = str(e)
agent_instance = None
def cleanup_resources():
"""Clean up resources on app shutdown."""
global agent_instance
logger.info("Cleaning up resources...")
try:
# Close agent and all its resources
if agent_instance:
agent_instance.close()
# Close embeddings client singleton if it exists
from models.embeddings import get_embeddings_client
if hasattr(get_embeddings_client, '_instance'):
get_embeddings_client._instance.close()
logger.success("✅ Resources cleaned up successfully")
except Exception as e:
logger.warning(f"Error during cleanup: {e}")
# --- Gradio Chat Logic ---
def respond(message: str, history: list[list[str]]):
"""
Main function for the Gradio ChatInterface.
Receives a user message and chat history, then uses the agent
to generate a streaming response.
Args:
message: The user's input message.
history: The conversation history provided by Gradio.
Yields:
A stream of response chunks to update the UI.
"""
global agent_instance, agent_ready, agent_error
# Initialize agent on first request if not already initialized
if not agent_ready and not agent_error and agent_instance is None:
yield "⏳ Initializing agent for first use, please wait..."
try:
logger.info("🔄 Initializing agent on first request...")
agent_instance = create_agent()
agent_ready = True
logger.success("✅ A11y Expert Agent is ready!")
except Exception as e:
logger.error(f"❌ Failed to initialize agent: {e}")
import traceback
logger.error(traceback.format_exc())
agent_error = str(e)
agent_instance = None
yield f"❌ Agent initialization failed: {agent_error}"
return
# Check if agent failed to initialize
if agent_error:
yield f"❌ Agent initialization failed: {agent_error}"
return
if not agent_instance:
yield "❌ Agent not available. Please check logs for errors."
return
logger.info(f"User query: '{message}'")
full_response = ""
try:
for chunk in agent_instance.ask(message):
full_response += chunk
yield full_response
except Exception as e:
logger.error(f"Error during response generation: {e}")
yield f"An error occurred: {e}"
# --- Gradio UI Definition ---
# Two-column layout: Chat on left, Markdown content on right
with gr.Blocks(title="A11y Expert") as demo:
gr.Markdown("# 🤖 A11y Expert")
gr.Markdown("Twój inteligentny asystent do spraw dostępności cyfrowej.")
with gr.Row():
# Left column: Chatbot
with gr.Column(scale=1):
gr.Markdown("### 💬 Chat")
chatbot = gr.Chatbot(height=500, show_label=False)
msg = gr.Textbox(
placeholder="Zadaj pytanie o WCAG, ARIA, lub poproś o analizę kodu...",
show_label=False,
container=False,
max_length=300
)
with gr.Row():
submit = gr.Button("Wyślij", variant="primary")
clear = gr.Button("Wyczyść")
# Example questions
gr.Examples(
examples=[
"Jakie są wymagania WCAG 2.2 dla etykiet formularzy?",
"Wyjaśnij rolę 'alert' w ARIA i podaj przykład.",
"Czy ten przycisk jest dostępny? <div onclick='...'>Click me</div>",
"Jaka jest różnica między aria-label a aria-labelledby?",
],
inputs=msg,
label="Przykładowe pytania"
)
# Right column: Markdown content from file
with gr.Column(scale=1):
gr.Markdown("### 📝 Notatki")
def load_notes():
"""Load notes from notes.md file."""
try:
with open("notes.md", "r", encoding="utf-8") as f:
return f.read()
except FileNotFoundError:
return """
## Witaj w A11y Expert! 👋
Stwórz plik `notes.md` w katalogu projektu aby zobaczyć tutaj swoje notatki.
### Przydatne linki:
- [WCAG 2.2 Guidelines](https://www.w3.org/WAI/WCAG22/quickref/)
- [ARIA Authoring Practices](https://www.w3.org/WAI/ARIA/apg/)
- [MDN Accessibility](https://developer.mozilla.org/en-US/docs/Web/Accessibility)
"""
except Exception as e:
return f"⚠️ Błąd wczytywania notes.md: {e}"
markdown_content = gr.Markdown(
value=load_notes(),
show_label=False,
elem_id="notes_display"
)
refresh_btn = gr.Button("🔄 Odśwież notatki", variant="secondary")
refresh_btn.click(
fn=load_notes,
outputs=markdown_content
)
# Chat logic
def user_message(user_input, history):
"""Add user message to chat history."""
return "", history + [{"role": "user", "content": user_input}]
def bot_response(history):
"""Generate bot response."""
user_input = history[-1]["content"]
# Extract text from multimodal format if needed
if isinstance(user_input, list):
user_input = " ".join([item.get("text", "") for item in user_input if item.get("type") == "text"])
# Add assistant message placeholder
history.append({"role": "assistant", "content": ""})
for response in respond(user_input, history[:-1]):
history[-1]["content"] = response
yield history
# Wire up the chat
submit.click(user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
bot_response, chatbot, chatbot
)
msg.submit(user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
bot_response, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
# --- App Launch ---
# Register cleanup handler
# atexit.register(cleanup_resources) # Disabled: Causes premature shutdown on Hugging Face Spaces
# Don't initialize agent on startup - it will be initialized on first user query
logger.info("🚀 Starting Gradio app with on-demand agent initialization...")
logger.info("ℹ️ Agent will initialize on first user query")
# For Hugging Face Spaces, we need to either:
# 1. Have a variable named 'demo' (which we have)
# 2. Or explicitly call demo.queue() to enable the app
# We'll use queue() to ensure proper startup
if __name__ == "__main__":
demo.queue()
demo.launch()
else:
# On HF Spaces, just ensure demo is ready
demo.queue()