|
|
""" |
|
|
Gradio UI for the A11y Expert Agent with lazy initialization. |
|
|
This module creates a Gradio ChatInterface that starts FAST, |
|
|
then initializes the agent in the background. |
|
|
""" |
|
|
import sys |
|
|
import os |
|
|
|
|
|
|
|
|
os.environ['PYTHONUNBUFFERED'] = '1' |
|
|
|
|
|
|
|
|
import warnings |
|
|
warnings.filterwarnings('ignore', category=ResourceWarning) |
|
|
|
|
|
import gradio as gr |
|
|
from loguru import logger |
|
|
import atexit |
|
|
import threading |
|
|
from agent.a11y_agent import create_agent, A11yExpertAgent |
|
|
from config import get_settings |
|
|
|
|
|
|
|
|
|
|
|
logger.remove() |
|
|
logger.add(sys.stderr, level=get_settings().log_level) |
|
|
|
|
|
|
|
|
agent_instance: A11yExpertAgent = None |
|
|
agent_ready = False |
|
|
agent_error = None |
|
|
|
|
|
|
|
|
def initialize_agent_background(): |
|
|
"""Initialize the agent in background thread.""" |
|
|
global agent_instance, agent_ready, agent_error |
|
|
|
|
|
try: |
|
|
logger.info("🔄 Starting agent initialization in background...") |
|
|
import time |
|
|
|
|
|
logger.info("⏱️ Sleeping 2 seconds to avoid race condition...") |
|
|
time.sleep(2) |
|
|
|
|
|
logger.info("📦 Calling create_agent()...") |
|
|
agent_instance = create_agent() |
|
|
|
|
|
logger.info("✓ Agent instance created, setting ready flag...") |
|
|
agent_ready = True |
|
|
logger.success("✅ A11y Expert Agent is ready!") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Failed to initialize agent: {e}") |
|
|
import traceback |
|
|
logger.error(traceback.format_exc()) |
|
|
agent_error = str(e) |
|
|
agent_instance = None |
|
|
|
|
|
def cleanup_resources(): |
|
|
"""Clean up resources on app shutdown.""" |
|
|
global agent_instance |
|
|
logger.info("Cleaning up resources...") |
|
|
try: |
|
|
|
|
|
if agent_instance: |
|
|
agent_instance.close() |
|
|
|
|
|
|
|
|
from models.embeddings import get_embeddings_client |
|
|
if hasattr(get_embeddings_client, '_instance'): |
|
|
get_embeddings_client._instance.close() |
|
|
|
|
|
logger.success("✅ Resources cleaned up successfully") |
|
|
except Exception as e: |
|
|
logger.warning(f"Error during cleanup: {e}") |
|
|
|
|
|
|
|
|
def respond(message: str, history: list[list[str]]): |
|
|
""" |
|
|
Main function for the Gradio ChatInterface. |
|
|
Receives a user message and chat history, then uses the agent |
|
|
to generate a streaming response. |
|
|
Args: |
|
|
message: The user's input message. |
|
|
history: The conversation history provided by Gradio. |
|
|
Yields: |
|
|
A stream of response chunks to update the UI. |
|
|
""" |
|
|
global agent_instance, agent_ready, agent_error |
|
|
|
|
|
|
|
|
if not agent_ready and not agent_error and agent_instance is None: |
|
|
yield "⏳ Initializing agent for first use, please wait..." |
|
|
try: |
|
|
logger.info("🔄 Initializing agent on first request...") |
|
|
agent_instance = create_agent() |
|
|
agent_ready = True |
|
|
logger.success("✅ A11y Expert Agent is ready!") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Failed to initialize agent: {e}") |
|
|
import traceback |
|
|
logger.error(traceback.format_exc()) |
|
|
agent_error = str(e) |
|
|
agent_instance = None |
|
|
yield f"❌ Agent initialization failed: {agent_error}" |
|
|
return |
|
|
|
|
|
|
|
|
if agent_error: |
|
|
yield f"❌ Agent initialization failed: {agent_error}" |
|
|
return |
|
|
|
|
|
if not agent_instance: |
|
|
yield "❌ Agent not available. Please check logs for errors." |
|
|
return |
|
|
|
|
|
logger.info(f"User query: '{message}'") |
|
|
full_response = "" |
|
|
try: |
|
|
for chunk in agent_instance.ask(message): |
|
|
full_response += chunk |
|
|
yield full_response |
|
|
except Exception as e: |
|
|
logger.error(f"Error during response generation: {e}") |
|
|
yield f"An error occurred: {e}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="A11y Expert") as demo: |
|
|
gr.Markdown("# 🤖 A11y Expert") |
|
|
gr.Markdown("Twój inteligentny asystent do spraw dostępności cyfrowej.") |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### 💬 Chat") |
|
|
chatbot = gr.Chatbot(height=500, show_label=False) |
|
|
msg = gr.Textbox( |
|
|
placeholder="Zadaj pytanie o WCAG, ARIA, lub poproś o analizę kodu...", |
|
|
show_label=False, |
|
|
container=False, |
|
|
max_length=300 |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
submit = gr.Button("Wyślij", variant="primary") |
|
|
clear = gr.Button("Wyczyść") |
|
|
|
|
|
|
|
|
gr.Examples( |
|
|
examples=[ |
|
|
"Jakie są wymagania WCAG 2.2 dla etykiet formularzy?", |
|
|
"Wyjaśnij rolę 'alert' w ARIA i podaj przykład.", |
|
|
"Czy ten przycisk jest dostępny? <div onclick='...'>Click me</div>", |
|
|
"Jaka jest różnica między aria-label a aria-labelledby?", |
|
|
], |
|
|
inputs=msg, |
|
|
label="Przykładowe pytania" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### 📝 Notatki") |
|
|
|
|
|
def load_notes(): |
|
|
"""Load notes from notes.md file.""" |
|
|
try: |
|
|
with open("notes.md", "r", encoding="utf-8") as f: |
|
|
return f.read() |
|
|
except FileNotFoundError: |
|
|
return """ |
|
|
## Witaj w A11y Expert! 👋 |
|
|
|
|
|
Stwórz plik `notes.md` w katalogu projektu aby zobaczyć tutaj swoje notatki. |
|
|
|
|
|
### Przydatne linki: |
|
|
- [WCAG 2.2 Guidelines](https://www.w3.org/WAI/WCAG22/quickref/) |
|
|
- [ARIA Authoring Practices](https://www.w3.org/WAI/ARIA/apg/) |
|
|
- [MDN Accessibility](https://developer.mozilla.org/en-US/docs/Web/Accessibility) |
|
|
""" |
|
|
except Exception as e: |
|
|
return f"⚠️ Błąd wczytywania notes.md: {e}" |
|
|
|
|
|
markdown_content = gr.Markdown( |
|
|
value=load_notes(), |
|
|
show_label=False, |
|
|
elem_id="notes_display" |
|
|
) |
|
|
|
|
|
refresh_btn = gr.Button("🔄 Odśwież notatki", variant="secondary") |
|
|
refresh_btn.click( |
|
|
fn=load_notes, |
|
|
outputs=markdown_content |
|
|
) |
|
|
|
|
|
|
|
|
def user_message(user_input, history): |
|
|
"""Add user message to chat history.""" |
|
|
return "", history + [{"role": "user", "content": user_input}] |
|
|
|
|
|
def bot_response(history): |
|
|
"""Generate bot response.""" |
|
|
user_input = history[-1]["content"] |
|
|
|
|
|
|
|
|
if isinstance(user_input, list): |
|
|
user_input = " ".join([item.get("text", "") for item in user_input if item.get("type") == "text"]) |
|
|
|
|
|
|
|
|
history.append({"role": "assistant", "content": ""}) |
|
|
|
|
|
for response in respond(user_input, history[:-1]): |
|
|
history[-1]["content"] = response |
|
|
yield history |
|
|
|
|
|
|
|
|
submit.click(user_message, [msg, chatbot], [msg, chatbot], queue=False).then( |
|
|
bot_response, chatbot, chatbot |
|
|
) |
|
|
msg.submit(user_message, [msg, chatbot], [msg, chatbot], queue=False).then( |
|
|
bot_response, chatbot, chatbot |
|
|
) |
|
|
clear.click(lambda: None, None, chatbot, queue=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger.info("🚀 Starting Gradio app with on-demand agent initialization...") |
|
|
logger.info("ℹ️ Agent will initialize on first user query") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.queue() |
|
|
demo.launch() |
|
|
else: |
|
|
|
|
|
demo.queue() |
|
|
|