File size: 5,476 Bytes
59c860e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
"""
Gradio UI for the A11y Expert Agent.
This module creates a Gradio ChatInterface to interact with the
A11yExpertAgent, allowing users to ask accessibility-related questions.
"""
import asyncio
import gradio as gr
from loguru import logger
import sys
import atexit
import threading
from agent.a11y_agent import create_agent, A11yExpertAgent
from config import get_settings
# --- Setup ---
# Configure logger
logger.remove()
logger.add(sys.stderr, level=get_settings().log_level)
# Global agent instance
agent_instance: A11yExpertAgent = None
agent_ready = False
agent_error = None
# Global event loop for async operations
loop = None
# --- Agent Initialization ---
def initialize_agent_background():
"""Initialize the agent in background thread."""
global agent_instance, agent_ready, agent_error, loop
try:
logger.info("🔄 Starting agent initialization in background...")
# Create new event loop for this thread
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
agent_instance = loop.run_until_complete(create_agent())
agent_ready = True
logger.success("✅ A11y Expert Agent is ready!")
except Exception as e:
logger.error(f"Failed to initialize agent: {e}")
agent_error = str(e)
agent_instance = None
def cleanup_resources():
"""Clean up resources on app shutdown."""
global agent_instance, loop
logger.info("Cleaning up resources...")
try:
# Close agent and all its resources
if agent_instance:
agent_instance.close()
# Close embeddings client singleton if it exists
from models.embeddings import get_embeddings_client
if hasattr(get_embeddings_client, '_instance'):
get_embeddings_client._instance.close()
# Close event loop if it exists and is still open
if loop and not loop.is_closed():
# Cancel all pending tasks
try:
pending = asyncio.all_tasks(loop)
for task in pending:
task.cancel()
loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
except RuntimeError:
pass # Loop may already be stopped
loop.close()
logger.success("✅ Resources cleaned up successfully")
except Exception as e:
logger.warning(f"Error during cleanup: {e}")
# --- Gradio Chat Logic ---
async def respond(message: str, history: list[list[str]]):
"""
Main function for the Gradio ChatInterface.
Receives a user message and chat history, then uses the agent
to generate a streaming response.
Args:
message: The user's input message.
history: The conversation history provided by Gradio.
Yields:
A stream of response chunks to update the UI.
"""
global agent_instance, agent_ready, agent_error
# Wait for agent to be ready
if not agent_ready:
if agent_error:
yield f"❌ Agent initialization failed: {agent_error}"
return
yield "⏳ Agent is initializing, please wait..."
# Wait up to 60 seconds for agent to be ready
for i in range(60):
await asyncio.sleep(1)
if agent_ready:
break
if agent_error:
yield f"❌ Agent initialization failed: {agent_error}"
return
if not agent_ready:
yield "❌ Agent initialization timeout. Please try again later."
return
if not agent_instance:
yield "❌ Agent not available. Please check logs for errors."
return
logger.info(f"User query: '{message}'")
full_response = ""
try:
# Use the global event loop to run async generator
async for chunk in agent_instance.ask(message):
full_response += chunk
yield full_response
except Exception as e:
logger.error(f"Error during response generation: {e}")
yield f"An error occurred: {e}"
# --- Gradio UI Definition ---
# Using gr.Blocks for more layout control
with gr.Blocks() as demo:
gr.Markdown("# 🤖 A11y Expert")
gr.Markdown(
"Twój inteligentny asystent do spraw dostępności cyfrowej. "
"Zadaj pytanie o WCAG, ARIA, lub poproś o analizę kodu."
)
# The main chat interface
chat = gr.ChatInterface(respond)
# Example questions
gr.Examples(
[
"Jakie są wymagania WCAG 2.2 dla etykiet formularzy?",
"Wyjaśnij rolę 'alert' w ARIA i podaj przykład.",
"Czy ten przycisk jest dostępny? <div onclick='...'>Click me</div>",
"Jaka jest różnica między ria-label a ria-labelledby?",
],
inputs=[chat.textbox],
label="Przykładowe pytania"
)
# --- App Launch ---
if __name__ == "__main__":
# Register cleanup handler
atexit.register(cleanup_resources)
# Initialize agent before launching Gradio
initialize_agent_sync()
settings = get_settings()
logger.info("Launching Gradio app...")
try:
demo.launch(
server_name=settings.server_host,
server_port=settings.server_port,
show_error=True,
)
except KeyboardInterrupt:
logger.info("Received interrupt signal")
finally:
cleanup_resources()
|