Spaces:
Runtime error
Runtime error
File size: 7,591 Bytes
6cf6a92 860144d 6cf6a92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 |
"""
Medical Drug QA Chatbot - Gradio Interface
Optimized for Hugging Face Spaces Deployment
"""
"""
Medical Drug QA Chatbot - Gradio Interface
"""
import gradio as gr
import os
import sys
# This ensures the imports work correctly
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, current_dir)
# Lazy imports - only load when needed
_query_processor = None
_retrieval_system = None
_answer_generator = None
def initialize_models():
"""Lazy loading of models to speed up startup."""
global _query_processor, _retrieval_system, _answer_generator
if _query_processor is None:
print("[App] Loading query processor...")
from Query_processing import preprocess_query
_query_processor = preprocess_query
if _retrieval_system is None:
print("[App] Loading retrieval system...")
from Retrieval import Retrieval_averagedQP
_retrieval_system = Retrieval_averagedQP
if _answer_generator is None:
print("[App] Loading answer generator...")
from Answer_Generation import answer_generation
_answer_generator = answer_generation
return _query_processor, _retrieval_system, _answer_generator
def chat_agent(message: str, history: list) -> tuple:
"""
Main chat function with error handling and loading states.
Parameters:
message (str): User's question
history (list): Chat history
Returns:
tuple: (empty string, updated history)
"""
if not message or message.strip() == "":
return "", history
try:
# Initialize models
preprocess_query, Retrieval_averagedQP, answer_generation = initialize_models()
# Step 1: Query Processing
print(f"[Chat] Processing query: {message}")
intent, entities = preprocess_query(message)
# Step 2: Retrieval
print(f"[Chat] Retrieving relevant chunks...")
chunks = Retrieval_averagedQP(message, intent, entities, top_k=10, alpha=0.8)
if chunks.empty:
error_msg = "β οΈ Sorry, I couldn't find relevant information in the database. Please try rephrasing your question."
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": error_msg})
return "", history
# Step 3: Answer Generation
print(f"[Chat] Generating answer...")
answer = answer_generation(message, chunks, top_k=3)
# Format context for display
context = "\n\n".join([
f"**{row['drug_name']} | {row['section']} > {row['subsection']}**\n"
f"{row['chunk_text'][:200]}{'...' if len(row['chunk_text']) > 200 else ''}\n"
f"*Relevance Score: {round(row['semantic_similarity_score'], 3)}*"
for i, row in chunks.head(3).iterrows()
])
# Add to history
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": answer})
history.append({
"role": "assistant",
"content": f"<details><summary>π View Source Chunks</summary>\n\n{context}\n\n</details>"
})
print(f"[Chat] β Response generated successfully")
return "", history
except Exception as e:
print(f"[Chat] ERROR: {e}")
import traceback
traceback.print_exc()
error_msg = f"β An error occurred: {str(e)}\n\nPlease try again or rephrase your question."
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": error_msg})
return "", history
# Build Gradio Interface
with gr.Blocks(
theme=gr.themes.Soft(primary_hue="cyan"),
title="Medical Drug QA Chatbot",
css="""
.info-container, .info-footer {
width: 90%;
max-width: 1000px;
margin: 0 auto;
}
details.info-section, details.about-section {
background: white;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
margin: 1em 0;
padding: 0;
}
details > summary {
padding: 1em 1.5em;
font-size: 1.1em;
font-weight: bold;
color: #00838f;
cursor: pointer;
border-radius: 12px;
transition: background-color 0.3s ease;
}
details > summary:hover {
background-color: #e0f7fa;
}
.disclaimer {
background: #fff3cd;
border: 1px solid #ffc107;
border-radius: 8px;
padding: 1em;
margin: 1em 0;
}
"""
) as demo:
# Header
gr.Markdown("# π Medical Drug QA Chatbot")
gr.Markdown("_Ask questions about medications and get reliable answers from trusted medical sources._")
# Instructions
with gr.Accordion("π€ How to Use", open=False):
gr.Markdown("""
Simply type your question about any medication. You can ask about:
- **Side effects** and warnings
- **Dosage** and usage instructions
- **Drug interactions**
- **Storage** and handling
- **Precautions** for specific conditions
### π‘ Example Questions:
- "What are the common side effects of Aspirin?"
- "How should I store Insulin?"
- "What precautions should I take with Lisinopril?"
- "Can I drink alcohol while taking Metformin?"
""")
# Chatbot
chatbot = gr.Chatbot(
type="messages",
height=500,
label="Chat",
show_label=False,
avatar_images=(None, "π€")
)
# Input
with gr.Row():
msg = gr.Textbox(
placeholder="Ask your medical question here...",
scale=9,
container=False,
show_label=False
)
submit = gr.Button("Send", scale=1, variant="primary")
with gr.Row():
clear = gr.Button("ποΈ Clear Chat", scale=1)
# Event handlers
msg.submit(
fn=chat_agent,
inputs=[msg, chatbot],
outputs=[msg, chatbot],
)
submit.click(
fn=chat_agent,
inputs=[msg, chatbot],
outputs=[msg, chatbot],
)
clear.click(
fn=lambda: (None, []),
inputs=None,
outputs=[msg, chatbot],
)
# About section
with gr.Accordion("π About This Project", open=False):
gr.Markdown("""
This Medical Drug QA system uses advanced NLP technologies:
- **Data Source**: Mayo Clinic's comprehensive drug database
- **NER**: BioBERT for chemical/drug entity recognition
- **Retrieval**: Hybrid system with MiniLM-V6 + BioBERT reranking
- **Answer Generation**: Llama-4 via Groq API
**Technologies**: Transformers, FAISS, Sentence-BERT, Gradio
""")
# Disclaimer
gr.HTML("""
<div class="disclaimer">
<strong>β οΈ Medical Disclaimer</strong>: This chatbot provides educational information only.
It should NOT be used as a substitute for professional medical advice, diagnosis, or treatment.
Always consult a qualified healthcare provider for medical decisions.
</div>
""")
# Launch
if __name__ == "__main__":
demo.queue() # Enable queuing for better performance
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False, # Set to False for HF Spaces
show_error=True
) |