NiranjanSathish's picture
Update Scripts/app.py
f3b32da verified
"""
Medical Drug QA Chatbot - Gradio Interface
Optimized for Hugging Face Spaces Deployment
"""
"""
Medical Drug QA Chatbot - Gradio Interface
"""
import gradio as gr
import os
import sys
# This ensures the imports work correctly
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, current_dir)
# Lazy imports - only load when needed
_query_processor = None
_retrieval_system = None
_answer_generator = None
def initialize_models():
"""Lazy loading of models to speed up startup."""
global _query_processor, _retrieval_system, _answer_generator
if _query_processor is None:
print("[App] Loading query processor...")
from Query_processing import preprocess_query
_query_processor = preprocess_query
if _retrieval_system is None:
print("[App] Loading retrieval system...")
from Retrieval import Retrieval_averagedQP
_retrieval_system = Retrieval_averagedQP
if _answer_generator is None:
print("[App] Loading answer generator...")
from Answer_Generation import answer_generation
_answer_generator = answer_generation
return _query_processor, _retrieval_system, _answer_generator
def chat_agent(message: str, history: list) -> tuple:
"""
Main chat function with error handling and loading states.
Parameters:
message (str): User's question
history (list): Chat history
Returns:
tuple: (empty string, updated history)
"""
if not message or message.strip() == "":
return "", history
try:
# Initialize models
preprocess_query, Retrieval_averagedQP, answer_generation = initialize_models()
# Step 1: Query Processing
print(f"[Chat] Processing query: {message}")
intent, entities = preprocess_query(message)
# Step 2: Retrieval
print(f"[Chat] Retrieving relevant chunks...")
chunks = Retrieval_averagedQP(message, intent, entities, top_k=10, alpha=0.8)
if chunks.empty:
error_msg = "⚠️ Sorry, I couldn't find relevant information in the database. Please try rephrasing your question."
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": error_msg})
return "", history
# Step 3: Answer Generation
print(f"[Chat] Generating answer...")
answer = answer_generation(message, chunks, top_k=3)
# Format context for display
context = "\n\n".join([
f"**{row['drug_name']} | {row['section']} > {row['subsection']}**\n"
f"{row['chunk_text'][:200]}{'...' if len(row['chunk_text']) > 200 else ''}\n"
f"*Relevance Score: {round(row['semantic_similarity_score'], 3)}*"
for i, row in chunks.head(3).iterrows()
])
# Add to history
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": answer})
history.append({
"role": "assistant",
"content": f"<details><summary>πŸ“š View Source Chunks</summary>\n\n{context}\n\n</details>"
})
print(f"[Chat] βœ“ Response generated successfully")
return "", history
except Exception as e:
print(f"[Chat] ERROR: {e}")
import traceback
traceback.print_exc()
error_msg = f"❌ An error occurred: {str(e)}\n\nPlease try again or rephrase your question."
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": error_msg})
return "", history
# Build Gradio Interface
with gr.Blocks(
theme=gr.themes.Soft(primary_hue="cyan"),
title="Medical Drug QA Chatbot",
css="""
.info-container, .info-footer {
width: 90%;
max-width: 1000px;
margin: 0 auto;
}
details.info-section, details.about-section {
background: white;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
margin: 1em 0;
padding: 0;
}
details > summary {
padding: 1em 1.5em;
font-size: 1.1em;
font-weight: bold;
color: #00838f;
cursor: pointer;
border-radius: 12px;
transition: background-color 0.3s ease;
}
details > summary:hover {
background-color: #e0f7fa;
}
.disclaimer {
background: #fff3cd;
border: 1px solid #ffc107;
border-radius: 8px;
padding: 1em;
margin: 1em 0;
}
"""
) as demo:
# Header
gr.Markdown("# πŸ’Š Medical Drug QA Chatbot")
gr.Markdown("_Ask questions about medications and get reliable answers from trusted medical sources._")
# Instructions
with gr.Accordion("πŸ€” How to Use", open=False):
gr.Markdown("""
Simply type your question about any medication. You can ask about:
- **Side effects** and warnings
- **Dosage** and usage instructions
- **Drug interactions**
- **Storage** and handling
- **Precautions** for specific conditions
### πŸ’‘ Example Questions:
- "What are the common side effects of Aspirin?"
- "How should I store Insulin?"
- "What precautions should I take with Lisinopril?"
- "Can I drink alcohol while taking Metformin?"
""")
# Chatbot
chatbot = gr.Chatbot(
type="messages",
height=500,
label="Chat",
show_label=False,
avatar_images=(None, "πŸ€–")
)
# Input
with gr.Row():
msg = gr.Textbox(
placeholder="Ask your medical question here...",
scale=9,
container=False,
show_label=False
)
submit = gr.Button("Send", scale=1, variant="primary")
with gr.Row():
clear = gr.Button("πŸ—‘οΈ Clear Chat", scale=1)
# Event handlers
msg.submit(
fn=chat_agent,
inputs=[msg, chatbot],
outputs=[msg, chatbot],
)
submit.click(
fn=chat_agent,
inputs=[msg, chatbot],
outputs=[msg, chatbot],
)
clear.click(
fn=lambda: (None, []),
inputs=None,
outputs=[msg, chatbot],
)
# About section
with gr.Accordion("πŸ“š About This Project", open=False):
gr.Markdown("""
This Medical Drug QA system uses advanced NLP technologies:
- **Data Source**: Mayo Clinic's comprehensive drug database
- **NER**: BioBERT for chemical/drug entity recognition
- **Retrieval**: Hybrid system with MiniLM-V6 + BioBERT reranking
- **Answer Generation**: Llama-4 via Groq API
**Technologies**: Transformers, FAISS, Sentence-BERT, Gradio
""")
# Disclaimer
gr.HTML("""
<div class="disclaimer">
<strong>⚠️ Medical Disclaimer</strong>: This chatbot provides educational information only.
It should NOT be used as a substitute for professional medical advice, diagnosis, or treatment.
Always consult a qualified healthcare provider for medical decisions.
</div>
""")
# Launch
if __name__ == "__main__":
demo.queue() # Enable queuing for better performance
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False, # Set to False for HF Spaces
show_error=True
)