File size: 5,229 Bytes
62d15f6 be2572b 62d15f6 be2572b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import os
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Use Hugging Face token (set in Space settings)
token = os.environ.get("HF_TOKEN")
model_id = "ibm-granite/granite-3.3-2b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
model = AutoModelForCausalLM.from_pretrained(model_id, token=token)
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=200)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def disease_prediction(symptoms):
prompt = f"Patient has symptoms: {symptoms}. What are the possible conditions?"
return generate_response(prompt)
def treatment_plan(condition):
prompt = f"Suggest treatment for {condition}"
return generate_response(prompt)
def health_analytics(vitals):
prompt = f"Analyze vitals: {vitals}"
return generate_response(prompt)
def patient_chat(query):
return generate_response(query)
with gr.Blocks() as demo:
gr.Markdown("# π₯ HealthAI")
with gr.Tab("Disease Prediction"):
inp = gr.Textbox(label="Symptoms")
out = gr.Textbox()
gr.Button("Predict").click(disease_prediction, inp, out)
with gr.Tab("Treatment Plan"):
inp2 = gr.Textbox(label="Condition")
out2 = gr.Textbox()
gr.Button("Get Plan").click(treatment_plan, inp2, out2)
with gr.Tab("Health Analytics"):
inp3 = gr.Textbox(label="Vitals")
out3 = gr.Textbox()
gr.Button("Analyze").click(health_analytics, inp3, out3)
with gr.Tab("Patient Chat"):
inp4 = gr.Textbox(label="Ask a question")
out4 = gr.Textbox()
gr.Button("Ask").click(patient_chat, inp4, out4)
demo.launch()
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load Hugging Face token (paste yours below if needed)
os.environ['HF_TOKEN'] = 'HF_TOKEN'
model_id = "ibm-granite/granite-3.3-2b-instruct"
token = os.getenv("HF_TOKEN")
tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32, token=token)
# Core generation function
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Four feature functions
def disease_prediction(symptoms):
prompt = f"Patient has symptoms: {symptoms}. What could be the possible conditions?"
return generate_response(prompt)
def treatment_plan(condition):
prompt = f"What is the treatment plan for {condition}? Include medications, lifestyle changes, and follow-up."
return generate_response(prompt)
def health_analytics(vitals):
prompt = f"Analyze this health data and give insights: {vitals}"
return generate_response(prompt)
def patient_chat(query):
prompt = f"Medical Question: {query}"
return generate_response(prompt)
# Custom CSS
custom_css = """
body {
font-family: 'Segoe UI', sans-serif;
background-color: #f8f9fa;
}
h1, h2 {
color: #114B5F;
font-weight: bold;
}
.gradio-container {
padding: 20px !important;
}
textarea {
border-radius: 10px !important;
border: 1px solid #ccc !important;
}
button {
background-color: #114B5F !important;
color: white !important;
border-radius: 8px !important;
padding: 10px 16px !important;
}
.tabitem {
background-color: #d6ecf3 !important;
padding: 10px;
border-radius: 10px;
}
"""
# Gradio Interface
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("# π₯ HealthAI - Generative Healthcare Assistant")
with gr.Tab("π§ Disease Prediction"):
with gr.Column():
symptom_input = gr.Textbox(label="Enter your symptoms")
disease_output = gr.Textbox(label="Predicted Conditions")
predict_btn = gr.Button("Predict")
predict_btn.click(disease_prediction, inputs=symptom_input, outputs=disease_output)
with gr.Tab("π Treatment Plans"):
with gr.Column():
condition_input = gr.Textbox(label="Enter diagnosed condition")
treatment_output = gr.Textbox(label="Recommended Treatment")
treatment_btn = gr.Button("Get Treatment Plan")
treatment_btn.click(treatment_plan, inputs=condition_input, outputs=treatment_output)
with gr.Tab("π Health Analytics"):
with gr.Column():
vitals_input = gr.Textbox(label="Enter vitals (e.g., heart rate: 80, BP: 120/80...)")
analytics_output = gr.Textbox(label="AI Insights")
analytics_btn = gr.Button("Analyze")
analytics_btn.click(health_analytics, inputs=vitals_input, outputs=analytics_output)
with gr.Tab("π¬ Patient Chat"):
with gr.Column():
query_input = gr.Textbox(label="Ask a health-related question")
chat_output = gr.Textbox(label="Response")
chat_btn = gr.Button("Ask")
chat_btn.click(patient_chat, inputs=query_input, outputs=chat_output)
demo.launch() |