Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
from sentence_transformers import SentenceTransformer
|
| 4 |
+
import torch
|
| 5 |
+
import re
|
| 6 |
+
|
| 7 |
+
# Initialize models and client
|
| 8 |
+
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 9 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
|
| 10 |
+
|
| 11 |
+
# Load Medicaid/health info
|
| 12 |
+
with open("medicaid_info.txt", "r", encoding="utf-8") as file:
|
| 13 |
+
health_info = file.read()
|
| 14 |
+
|
| 15 |
+
# Load clinic info
|
| 16 |
+
with open("info.txt", "r", encoding="utf-8") as file:
|
| 17 |
+
clinic_data = file.read().lower()
|
| 18 |
+
|
| 19 |
+
# Preprocess info into chunks
|
| 20 |
+
def preprocess_chunks(text):
|
| 21 |
+
chunks = [chunk.strip() for chunk in text.split('.') if chunk.strip()]
|
| 22 |
+
return chunks
|
| 23 |
+
|
| 24 |
+
health_chunks = preprocess_chunks(health_info)
|
| 25 |
+
health_embeddings = embedding_model.encode(health_chunks, convert_to_tensor=True)
|
| 26 |
+
|
| 27 |
+
# Semantic search
|
| 28 |
+
def get_relevant_chunks(query, embeddings, text_chunks):
|
| 29 |
+
query_embedding = embedding_model.encode(query, convert_to_tensor=True)
|
| 30 |
+
query_embedding = query_embedding / query_embedding.norm()
|
| 31 |
+
embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)
|
| 32 |
+
similarities = torch.matmul(embeddings, query_embedding)
|
| 33 |
+
top_indices = torch.topk(similarities, k=3).indices
|
| 34 |
+
return [text_chunks[i] for i in top_indices]
|
| 35 |
+
|
| 36 |
+
# Clinic finder
|
| 37 |
+
def find_clinic_by_county(county):
|
| 38 |
+
county = county.lower()
|
| 39 |
+
sections = clinic_data.split("###")
|
| 40 |
+
for section in sections:
|
| 41 |
+
if county in section:
|
| 42 |
+
lines = section.strip().split("\n")
|
| 43 |
+
if lines and county in lines[0].lower():
|
| 44 |
+
lines = lines[1:]
|
| 45 |
+
return "\n".join(lines).strip()
|
| 46 |
+
return "⚠️ Sorry, I couldn’t find clinics for that county. Check spelling or try a nearby county."
|
| 47 |
+
|
| 48 |
+
# Chatbot logic
|
| 49 |
+
def respond(message, history, name, focus_area):
|
| 50 |
+
focus = focus_area[0] if focus_area else "general help"
|
| 51 |
+
top_chunks = get_relevant_chunks(message, health_embeddings, health_chunks)
|
| 52 |
+
context = "\n".join(top_chunks)
|
| 53 |
+
|
| 54 |
+
messages = [
|
| 55 |
+
{
|
| 56 |
+
"role": "system",
|
| 57 |
+
"content": (
|
| 58 |
+
f"You are a friendly and supportive health advisor chatbot helping Washington residents, especially low-income individuals. "
|
| 59 |
+
f"You're chatting with {name}. Speak clearly and kindly, and keep responses under 100 words. Focus on {focus}."
|
| 60 |
+
)
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"role": "user",
|
| 64 |
+
"content": f"Context:\n{context}\n\nQuestion: {message}"
|
| 65 |
+
}
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
response = client.chat_completion(messages, max_tokens=120)
|
| 69 |
+
return response['choices'][0]['message']['content'].strip()
|
| 70 |
+
|
| 71 |
+
# Interface styling
|
| 72 |
+
title = "# 🩺 HealthHelpBot"
|
| 73 |
+
description = "### Helping Washington residents find care, understand Medicaid, and locate free clinics."
|
| 74 |
+
|
| 75 |
+
with gr.Blocks(theme="gstaff/soft-blue") as demo:
|
| 76 |
+
with gr.Row():
|
| 77 |
+
gr.Markdown(title)
|
| 78 |
+
with gr.Row():
|
| 79 |
+
gr.Markdown(description)
|
| 80 |
+
|
| 81 |
+
with gr.Row():
|
| 82 |
+
with gr.Column(scale=1):
|
| 83 |
+
gr.Image("healthbot_avatar.png", width=250, show_label=False)
|
| 84 |
+
user_name = gr.Textbox(label="👤 Your Name", placeholder="e.g. Maria")
|
| 85 |
+
focus_area = gr.CheckboxGroup(["Medicaid", "Finding a clinic", "Insurance help"], label="What do you need help with?")
|
| 86 |
+
with gr.Column(scale=2):
|
| 87 |
+
gr.ChatInterface(fn=respond, additional_inputs=[user_name, focus_area], type="messages")
|
| 88 |
+
|
| 89 |
+
demo.launch()
|