nayyabzahra148's picture
Update app.py
b206615 verified
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
model_name = "mistralai/Mistral-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
load_in_4bit=True,
torch_dtype=torch.float16
)
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=200,
temperature=0.3,
repetition_penalty=1.1
)
def is_unsafe(query):
blocked = [
"dose", "dosage", "how much",
"diagnose", "prescribe",
"medicine for", "treatment", "cure"
]
return any(word in query.lower() for word in blocked)
def health_chatbot(user_input):
if is_unsafe(user_input):
return (
"I can’t provide diagnosis or medication instructions. "
"Please consult a qualified healthcare professional."
)
prompt = f"""
You are a general health information assistant.
Rules:
- Do NOT diagnose diseases.
- Do NOT prescribe medicines or give dosages.
- Provide general causes, symptoms, and prevention only.
- Keep answers simple and clear.
- End with: 'If symptoms persist, consult a healthcare professional.'
Question:
{user_input}
Answer:
"""
output = generator(prompt)[0]["generated_text"]
return output.split("Answer:")[-1].strip()
demo = gr.Interface(
fn=health_chatbot,
inputs=gr.Textbox(lines=2),
outputs="text",
title="🩺 General Health Query Chatbot"
)
demo.launch()