notes / app.py
Bonosa2's picture
Update app.py
724fa34 verified
raw
history blame
3.76 kB
import os
import pandas as pd
import torch
import gradio as gr
from transformers import (
AutoProcessor,
AutoTokenizer,
AutoModelForImageTextToText
)
from sklearn.model_selection import train_test_split
HF_TOKEN = os.environ.get("HF_TOKEN")
if not HF_TOKEN:
raise RuntimeError(
"Missing HF_TOKEN in env vars – add it under Settings → Secrets"
)
MODEL_ID = "google/gemma-3n-e2b-it"
# Load processor & tokenizer at top level for fast startup
processor = AutoProcessor.from_pretrained(
MODEL_ID, trust_remote_code=True, token=HF_TOKEN
)
tokenizer = AutoTokenizer.from_pretrained(
MODEL_ID, trust_remote_code=True, token=HF_TOKEN
)
def generate_and_export():
"""
On button click: load full model, generate 100 notes,
split 70/30, run inference & eval, save files, return download links.
"""
# Load the heavy model here
model = AutoModelForImageTextToText.from_pretrained(
MODEL_ID,
trust_remote_code=True,
token=HF_TOKEN,
torch_dtype=torch.float16,
device_map="auto"
)
device = next(model.parameters()).device
def to_soap(text: str) -> str:
inputs = processor.apply_chat_template(
[
{"role": "system", "content":[{"type":"text","text":"You are a medical AI assistant."}]},
{"role": "user", "content":[{"type":"text","text":text}]}
],
add_generation_prompt=True,
tokenize=True,
return_tensors="pt",
return_dict=True
).to(device)
out = model.generate(
**inputs,
max_new_tokens=400,
do_sample=True,
top_p=0.95,
temperature=0.1,
pad_token_id=processor.tokenizer.eos_token_id,
use_cache=False
)
prompt_len = inputs["input_ids"].shape[-1]
return processor.batch_decode(
out[:, prompt_len:], skip_special_tokens=True
)[0].strip()
# Generate 100 docs + GTs
docs, gts = [], []
for i in range(1, 101):
doc = to_soap(
"Generate a realistic, concise doctor's progress note for a single patient encounter."
)
docs.append(doc)
gts.append(to_soap(doc))
if i % 20 == 0:
torch.cuda.empty_cache()
# Split 70/30
df = pd.DataFrame({"doc_note": docs, "ground_truth_soap": gts})
train_df, test_df = train_test_split(df, test_size=0.3, random_state=42)
# Ensure outputs dir
os.makedirs("outputs", exist_ok=True)
# Inference on train → inference.tsv
train_preds = [to_soap(d) for d in train_df["doc_note"]]
inf = train_df.reset_index(drop=True).copy()
inf["id"] = inf.index + 1
inf["predicted_soap"] = train_preds
inf[["id","ground_truth_soap","predicted_soap"]].to_csv(
"outputs/inference.tsv", sep="\t", index=False
)
# Inference on test → eval.csv
test_preds = [to_soap(d) for d in test_df["doc_note"]]
pd.DataFrame({
"id": range(1, len(test_preds) + 1),
"predicted_soap": test_preds
}).to_csv("outputs/eval.csv", index=False)
return (
"✅ Done!",
"outputs/inference.tsv",
"outputs/eval.csv"
)
# Build Gradio interface (starts immediately)
with gr.Blocks() as demo:
gr.Markdown("# Gemma‑3n SOAP Generator 🩺")
btn = gr.Button("Generate & Export 100 Notes")
status = gr.Textbox(interactive=False, label="Status")
inf_file = gr.File(label="Download inference.tsv")
eval_file = gr.File(label="Download eval.csv")
btn.click(
fn=generate_and_export,
inputs=None,
outputs=[status, inf_file, eval_file]
)
if __name__ == "__main__":
demo.launch()