Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- README.md +60 -5
- app.py +128 -0
- requirements.txt +8 -0
README.md
CHANGED
|
@@ -1,13 +1,68 @@
|
|
| 1 |
---
|
| 2 |
-
title: Medical Assistant
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
---
|
| 12 |
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: TinyLlama Medical Assistant
|
| 3 |
+
emoji: 🩺
|
| 4 |
+
colorFrom: blue
|
| 5 |
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.0.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
---
|
| 12 |
|
| 13 |
+
# 🩺 TinyLlama Medical Assistant
|
| 14 |
+
|
| 15 |
+
A fine-tuned TinyLlama-1.1B model for answering questions about allopathic medicines.
|
| 16 |
+
|
| 17 |
+
## 🎯 Features
|
| 18 |
+
|
| 19 |
+
- Fine-tuned on 500 medicine examples using LoRA
|
| 20 |
+
- Provides information about common medications
|
| 21 |
+
- 4-bit quantization for efficient inference
|
| 22 |
+
- Interactive chat interface
|
| 23 |
+
|
| 24 |
+
## 💊 Supported Medications
|
| 25 |
+
|
| 26 |
+
The model has been trained on information about:
|
| 27 |
+
- **Pain Relief:** Paracetamol, Ibuprofen
|
| 28 |
+
- **Antibiotics:** Amoxicillin, Azithromycin
|
| 29 |
+
- **Diabetes:** Metformin
|
| 30 |
+
- **Cholesterol:** Atorvastatin
|
| 31 |
+
- **Blood Pressure:** Amlodipine, Losartan
|
| 32 |
+
- **Digestive:** Omeprazole
|
| 33 |
+
- **Allergies:** Cetirizine
|
| 34 |
+
|
| 35 |
+
## 🚀 Usage
|
| 36 |
+
|
| 37 |
+
Simply type your question about a medication in the chat box. For example:
|
| 38 |
+
- "What is Paracetamol used for?"
|
| 39 |
+
- "Tell me about Metformin"
|
| 40 |
+
- "What are the uses of Ibuprofen?"
|
| 41 |
+
|
| 42 |
+
## ⚠️ Disclaimer
|
| 43 |
+
|
| 44 |
+
**IMPORTANT:** This AI assistant is for educational purposes only. It should NOT replace professional medical advice, diagnosis, or treatment. Always seek the advice of qualified health providers with any questions about medical conditions or medications.
|
| 45 |
+
|
| 46 |
+
## 🛠️ Technical Details
|
| 47 |
+
|
| 48 |
+
- **Base Model:** TinyLlama/TinyLlama-1.1B-Chat-v1.0
|
| 49 |
+
- **Fine-tuning Method:** LoRA (Low-Rank Adaptation)
|
| 50 |
+
- **Training Data:** 500 instruction-response pairs
|
| 51 |
+
- **Quantization:** 4-bit NF4
|
| 52 |
+
- **Framework:** Transformers, PEFT, Gradio
|
| 53 |
+
|
| 54 |
+
## 📊 Model Performance
|
| 55 |
+
|
| 56 |
+
The model has been trained for 5 epochs with:
|
| 57 |
+
- Training loss: ~0.054
|
| 58 |
+
- Mean token accuracy: ~97.9%
|
| 59 |
+
|
| 60 |
+
## 📝 License
|
| 61 |
+
|
| 62 |
+
MIT License - Feel free to use and modify for educational purposes.
|
| 63 |
+
|
| 64 |
+
## 🙏 Acknowledgments
|
| 65 |
+
|
| 66 |
+
- TinyLlama team for the base model
|
| 67 |
+
- Hugging Face for the infrastructure
|
| 68 |
+
- Gradio for the UI framework
|
app.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 4 |
+
from peft import PeftModel
|
| 5 |
+
|
| 6 |
+
print("🔄 Loading model... Please wait...")
|
| 7 |
+
|
| 8 |
+
MEDICAL_DISCLAIMER = """
|
| 9 |
+
|
| 10 |
+
⚠️ **Medical Disclaimer:** This response is for educational purposes only
|
| 11 |
+
and is not a substitute for professional medical advice.
|
| 12 |
+
Always consult a qualified healthcare provider.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
base_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 16 |
+
lora_path = "./model"
|
| 17 |
+
|
| 18 |
+
bnb_config = BitsAndBytesConfig(
|
| 19 |
+
load_in_4bit=True,
|
| 20 |
+
bnb_4bit_quant_type="nf4",
|
| 21 |
+
bnb_4bit_compute_dtype=torch.bfloat16,
|
| 22 |
+
bnb_4bit_use_double_quant=True
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
| 26 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 27 |
+
|
| 28 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 29 |
+
base_model,
|
| 30 |
+
quantization_config=bnb_config,
|
| 31 |
+
device_map="auto",
|
| 32 |
+
trust_remote_code=True
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
model = PeftModel.from_pretrained(model, lora_path)
|
| 36 |
+
model.eval()
|
| 37 |
+
|
| 38 |
+
print("✅ Model loaded successfully!")
|
| 39 |
+
|
| 40 |
+
def generate_response(message, history, temperature, max_tokens, top_p):
|
| 41 |
+
prompt = f"""### Instruction:
|
| 42 |
+
{message}
|
| 43 |
+
|
| 44 |
+
### Response:
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 48 |
+
|
| 49 |
+
with torch.no_grad():
|
| 50 |
+
outputs = model.generate(
|
| 51 |
+
**inputs,
|
| 52 |
+
max_new_tokens=max_tokens,
|
| 53 |
+
temperature=temperature,
|
| 54 |
+
top_p=top_p,
|
| 55 |
+
do_sample=True,
|
| 56 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 57 |
+
pad_token_id=tokenizer.eos_token_id
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 61 |
+
response = response.split("### Response:")[-1].strip()
|
| 62 |
+
|
| 63 |
+
return response + MEDICAL_DISCLAIMER
|
| 64 |
+
|
| 65 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Medical Assistant") as demo:
|
| 66 |
+
gr.Markdown("""
|
| 67 |
+
# 🩺 TinyLlama Medical Assistant
|
| 68 |
+
### Fine-tuned on Allopathic Medicine Dataset
|
| 69 |
+
|
| 70 |
+
Ask questions about common medications and their uses.
|
| 71 |
+
""")
|
| 72 |
+
|
| 73 |
+
gr.Markdown("""
|
| 74 |
+
<div style='background-color: #fff3cd; padding: 15px; border-radius: 8px; border-left: 4px solid #ffc107; margin: 10px 0;'>
|
| 75 |
+
⚠️ <strong>Important:</strong> For educational purposes only. Not a substitute for professional medical advice.
|
| 76 |
+
</div>
|
| 77 |
+
""")
|
| 78 |
+
|
| 79 |
+
with gr.Row():
|
| 80 |
+
with gr.Column(scale=3):
|
| 81 |
+
chatbot = gr.Chatbot(
|
| 82 |
+
height=500,
|
| 83 |
+
label="Medical Consultation",
|
| 84 |
+
bubble_full_width=False
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
msg = gr.Textbox(
|
| 88 |
+
label="Your Question",
|
| 89 |
+
placeholder="Type your question, e.g., 'What is Paracetamol used for?'",
|
| 90 |
+
lines=2
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
with gr.Row():
|
| 94 |
+
submit = gr.Button("Send 🚀", variant="primary", scale=2)
|
| 95 |
+
clear = gr.Button("Clear 🗑️", variant="secondary", scale=1)
|
| 96 |
+
|
| 97 |
+
with gr.Column(scale=1):
|
| 98 |
+
gr.Markdown("### ⚙️ Settings")
|
| 99 |
+
|
| 100 |
+
temperature = gr.Slider(0.1, 1.5, 0.7, 0.1, label="Temperature")
|
| 101 |
+
max_tokens = gr.Slider(32, 256, 100, 8, label="Max Tokens")
|
| 102 |
+
top_p = gr.Slider(0.1, 1.0, 0.9, 0.05, label="Top-p")
|
| 103 |
+
|
| 104 |
+
gr.Markdown("""
|
| 105 |
+
### 📝 Try These:
|
| 106 |
+
- What is Paracetamol used for?
|
| 107 |
+
- Tell me about Metformin
|
| 108 |
+
- Uses of Ibuprofen
|
| 109 |
+
- What is Amoxicillin?
|
| 110 |
+
""")
|
| 111 |
+
|
| 112 |
+
def respond(message, chat_history, temp, max_tok, top_p_val):
|
| 113 |
+
if not message.strip():
|
| 114 |
+
return chat_history, ""
|
| 115 |
+
bot_response = generate_response(message, chat_history, temp, max_tok, top_p_val)
|
| 116 |
+
chat_history.append((message, bot_response))
|
| 117 |
+
return chat_history, ""
|
| 118 |
+
|
| 119 |
+
submit.click(respond, [msg, chatbot, temperature, max_tokens, top_p], [chatbot, msg])
|
| 120 |
+
msg.submit(respond, [msg, chatbot, temperature, max_tokens, top_p], [chatbot, msg])
|
| 121 |
+
clear.click(lambda: None, None, chatbot)
|
| 122 |
+
|
| 123 |
+
gr.Markdown("""
|
| 124 |
+
---
|
| 125 |
+
**Model Info:** TinyLlama-1.1B-Chat fine-tuned with LoRA on 500 medicine examples
|
| 126 |
+
""")
|
| 127 |
+
|
| 128 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
torch
|
| 3 |
+
transformers>=4.35.0
|
| 4 |
+
accelerate
|
| 5 |
+
peft
|
| 6 |
+
bitsandbytes
|
| 7 |
+
sentencepiece
|
| 8 |
+
protobuf
|