"""
Medical Drug QA Chatbot - Gradio Interface
Optimized for Hugging Face Spaces Deployment
"""
"""
Medical Drug QA Chatbot - Gradio Interface
"""
import gradio as gr
import os
import sys
# This ensures the imports work correctly
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, current_dir)
# Lazy imports - only load when needed
_query_processor = None
_retrieval_system = None
_answer_generator = None
def initialize_models():
"""Lazy loading of models to speed up startup."""
global _query_processor, _retrieval_system, _answer_generator
if _query_processor is None:
print("[App] Loading query processor...")
from Query_processing import preprocess_query
_query_processor = preprocess_query
if _retrieval_system is None:
print("[App] Loading retrieval system...")
from Retrieval import Retrieval_averagedQP
_retrieval_system = Retrieval_averagedQP
if _answer_generator is None:
print("[App] Loading answer generator...")
from Answer_Generation import answer_generation
_answer_generator = answer_generation
return _query_processor, _retrieval_system, _answer_generator
def chat_agent(message: str, history: list) -> tuple:
"""
Main chat function with error handling and loading states.
Parameters:
message (str): User's question
history (list): Chat history
Returns:
tuple: (empty string, updated history)
"""
if not message or message.strip() == "":
return "", history
try:
# Initialize models
preprocess_query, Retrieval_averagedQP, answer_generation = initialize_models()
# Step 1: Query Processing
print(f"[Chat] Processing query: {message}")
intent, entities = preprocess_query(message)
# Step 2: Retrieval
print(f"[Chat] Retrieving relevant chunks...")
chunks = Retrieval_averagedQP(message, intent, entities, top_k=10, alpha=0.8)
if chunks.empty:
error_msg = "⚠️ Sorry, I couldn't find relevant information in the database. Please try rephrasing your question."
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": error_msg})
return "", history
# Step 3: Answer Generation
print(f"[Chat] Generating answer...")
answer = answer_generation(message, chunks, top_k=3)
# Format context for display
context = "\n\n".join([
f"**{row['drug_name']} | {row['section']} > {row['subsection']}**\n"
f"{row['chunk_text'][:200]}{'...' if len(row['chunk_text']) > 200 else ''}\n"
f"*Relevance Score: {round(row['semantic_similarity_score'], 3)}*"
for i, row in chunks.head(3).iterrows()
])
# Add to history
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": answer})
history.append({
"role": "assistant",
"content": f"📚 View Source Chunks
\n\n{context}\n\n