File size: 8,647 Bytes
1dab660 59c860e 1dab660 478d8a6 1dab660 5fb63e2 59c860e 1dab660 59c860e 1dab660 59c860e 1dab660 59c860e 5fb63e2 1dab660 59c860e 77bf306 5411262 637ed9b 1dab660 77bf306 f3e3a9b 77bf306 f3e3a9b 77bf306 f3e3a9b 77bf306 1dab660 77bf306 59c860e 1dab660 5fb63e2 5411262 5fb63e2 59c860e 1dab660 5411262 1dab660 59c860e f3e3a9b 59c860e f3e3a9b 59c860e 1dab660 59c860e 1dab660 5411262 1dab660 af22390 3a40a92 af22390 3a40a92 af22390 3a40a92 af22390 3a40a92 af22390 1dab660 4ce8d8c 5a209a3 4ce8d8c f3e3a9b 4ce8d8c d05e81c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 |
"""
Gradio UI for the A11y Expert Agent with lazy initialization.
This module creates a Gradio ChatInterface that starts FAST,
then initializes the agent in the background.
"""
import sys
import os
# Suppress asyncio cleanup warnings by setting environment variable
os.environ['PYTHONUNBUFFERED'] = '1'
# Suppress all asyncio warnings at the earliest possible point
import warnings
warnings.filterwarnings('ignore', category=ResourceWarning)
import gradio as gr
from loguru import logger
import atexit
import threading
from agent.a11y_agent import create_agent, A11yExpertAgent
from config import get_settings
# --- Setup ---
# Configure logger
logger.remove()
logger.add(sys.stderr, level=get_settings().log_level)
# Global agent instance
agent_instance: A11yExpertAgent = None
agent_ready = False
agent_error = None
# --- Agent Initialization ---
def initialize_agent_background():
"""Initialize the agent in background thread."""
global agent_instance, agent_ready, agent_error
try:
logger.info("🔄 Starting agent initialization in background...")
import time
logger.info("⏱️ Sleeping 2 seconds to avoid race condition...")
time.sleep(2)
logger.info("📦 Calling create_agent()...")
agent_instance = create_agent()
logger.info("✓ Agent instance created, setting ready flag...")
agent_ready = True
logger.success("✅ A11y Expert Agent is ready!")
except Exception as e:
logger.error(f"❌ Failed to initialize agent: {e}")
import traceback
logger.error(traceback.format_exc())
agent_error = str(e)
agent_instance = None
def cleanup_resources():
"""Clean up resources on app shutdown."""
global agent_instance
logger.info("Cleaning up resources...")
try:
# Close agent and all its resources
if agent_instance:
agent_instance.close()
# Close embeddings client singleton if it exists
from models.embeddings import get_embeddings_client
if hasattr(get_embeddings_client, '_instance'):
get_embeddings_client._instance.close()
logger.success("✅ Resources cleaned up successfully")
except Exception as e:
logger.warning(f"Error during cleanup: {e}")
# --- Gradio Chat Logic ---
def respond(message: str, history: list[list[str]]):
"""
Main function for the Gradio ChatInterface.
Receives a user message and chat history, then uses the agent
to generate a streaming response.
Args:
message: The user's input message.
history: The conversation history provided by Gradio.
Yields:
A stream of response chunks to update the UI.
"""
global agent_instance, agent_ready, agent_error
# Initialize agent on first request if not already initialized
if not agent_ready and not agent_error and agent_instance is None:
yield "⏳ Initializing agent for first use, please wait..."
try:
logger.info("🔄 Initializing agent on first request...")
agent_instance = create_agent()
agent_ready = True
logger.success("✅ A11y Expert Agent is ready!")
except Exception as e:
logger.error(f"❌ Failed to initialize agent: {e}")
import traceback
logger.error(traceback.format_exc())
agent_error = str(e)
agent_instance = None
yield f"❌ Agent initialization failed: {agent_error}"
return
# Check if agent failed to initialize
if agent_error:
yield f"❌ Agent initialization failed: {agent_error}"
return
if not agent_instance:
yield "❌ Agent not available. Please check logs for errors."
return
logger.info(f"User query: '{message}'")
full_response = ""
try:
for chunk in agent_instance.ask(message):
full_response += chunk
yield full_response
except Exception as e:
logger.error(f"Error during response generation: {e}")
yield f"An error occurred: {e}"
# --- Gradio UI Definition ---
# Two-column layout: Chat on left, Markdown content on right
with gr.Blocks(title="A11y Expert") as demo:
gr.Markdown("# 🤖 A11y Expert")
gr.Markdown("Twój inteligentny asystent do spraw dostępności cyfrowej.")
with gr.Row():
# Left column: Chatbot
with gr.Column(scale=1):
gr.Markdown("### 💬 Chat")
chatbot = gr.Chatbot(height=500, show_label=False)
msg = gr.Textbox(
placeholder="Zadaj pytanie o WCAG, ARIA, lub poproś o analizę kodu...",
show_label=False,
container=False,
max_length=300
)
with gr.Row():
submit = gr.Button("Wyślij", variant="primary")
clear = gr.Button("Wyczyść")
# Example questions
gr.Examples(
examples=[
"Jakie są wymagania WCAG 2.2 dla etykiet formularzy?",
"Wyjaśnij rolę 'alert' w ARIA i podaj przykład.",
"Czy ten przycisk jest dostępny? <div onclick='...'>Click me</div>",
"Jaka jest różnica między aria-label a aria-labelledby?",
],
inputs=msg,
label="Przykładowe pytania"
)
# Right column: Markdown content from file
with gr.Column(scale=1):
gr.Markdown("### 📝 Notatki")
def load_notes():
"""Load notes from notes.md file."""
try:
with open("notes.md", "r", encoding="utf-8") as f:
return f.read()
except FileNotFoundError:
return """
## Witaj w A11y Expert! 👋
Stwórz plik `notes.md` w katalogu projektu aby zobaczyć tutaj swoje notatki.
### Przydatne linki:
- [WCAG 2.2 Guidelines](https://www.w3.org/WAI/WCAG22/quickref/)
- [ARIA Authoring Practices](https://www.w3.org/WAI/ARIA/apg/)
- [MDN Accessibility](https://developer.mozilla.org/en-US/docs/Web/Accessibility)
"""
except Exception as e:
return f"⚠️ Błąd wczytywania notes.md: {e}"
markdown_content = gr.Markdown(
value=load_notes(),
show_label=False,
elem_id="notes_display"
)
refresh_btn = gr.Button("🔄 Odśwież notatki", variant="secondary")
refresh_btn.click(
fn=load_notes,
outputs=markdown_content
)
# Chat logic
def user_message(user_input, history):
"""Add user message to chat history."""
return "", history + [{"role": "user", "content": user_input}]
def bot_response(history):
"""Generate bot response."""
user_input = history[-1]["content"]
# Extract text from multimodal format if needed
if isinstance(user_input, list):
user_input = " ".join([item.get("text", "") for item in user_input if item.get("type") == "text"])
# Add assistant message placeholder
history.append({"role": "assistant", "content": ""})
for response in respond(user_input, history[:-1]):
history[-1]["content"] = response
yield history
# Wire up the chat
submit.click(user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
bot_response, chatbot, chatbot
)
msg.submit(user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
bot_response, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
# --- App Launch ---
# Register cleanup handler
# atexit.register(cleanup_resources) # Disabled: Causes premature shutdown on Hugging Face Spaces
# Don't initialize agent on startup - it will be initialized on first user query
logger.info("🚀 Starting Gradio app with on-demand agent initialization...")
logger.info("ℹ️ Agent will initialize on first user query")
# For Hugging Face Spaces, we need to either:
# 1. Have a variable named 'demo' (which we have)
# 2. Or explicitly call demo.queue() to enable the app
# We'll use queue() to ensure proper startup
if __name__ == "__main__":
demo.queue()
demo.launch()
else:
# On HF Spaces, just ensure demo is ready
demo.queue()
|