Delete streamlit_app.py
Browse files- streamlit_app.py +0 -101
streamlit_app.py
DELETED
|
@@ -1,101 +0,0 @@
|
|
| 1 |
-
# streamlit_app.py
|
| 2 |
-
import streamlit as st
|
| 3 |
-
from PIL import Image
|
| 4 |
-
import torch
|
| 5 |
-
import torchvision.transforms as T
|
| 6 |
-
from transformers import ViTModel, T5ForConditionalGeneration, T5Tokenizer
|
| 7 |
-
|
| 8 |
-
# βββββ Model & tokenizer settings ββββββββββ
|
| 9 |
-
HF_MODEL_ID = "your-username/iu-chest-report-model" # β replace with your model repo
|
| 10 |
-
|
| 11 |
-
@st.cache_resource(show_spinner=False)
|
| 12 |
-
def load_models(model_id):
|
| 13 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
-
vit = ViTModel.from_pretrained("google/vit-base-patch16-224").to(device)
|
| 15 |
-
t5 = T5ForConditionalGeneration.from_pretrained(model_id).to(device)
|
| 16 |
-
tok = T5Tokenizer.from_pretrained(model_id)
|
| 17 |
-
return device, vit, t5, tok
|
| 18 |
-
|
| 19 |
-
device, vit, t5, tokenizer = load_models(HF_MODEL_ID)
|
| 20 |
-
|
| 21 |
-
# βββββ Transforms ββββββββββββββββββββββββββ
|
| 22 |
-
transform = T.Compose([
|
| 23 |
-
T.Resize((224,224)),
|
| 24 |
-
T.ToTensor(),
|
| 25 |
-
T.Normalize(mean=0.5, std=0.5),
|
| 26 |
-
])
|
| 27 |
-
|
| 28 |
-
# βββββ Streamlit layout ββββββββββββββββββββββ
|
| 29 |
-
st.set_page_config(page_title="Radiology Report Analysis", layout="wide")
|
| 30 |
-
st.markdown("<h1 style='text-align:center;'>Radiology Report Analysis</h1>", unsafe_allow_html=True)
|
| 31 |
-
st.markdown("<p style='text-align:center;'>Upload a chest X-ray (JPG or PNG) to let our AI analyze and provide a diagnosis.</p>", unsafe_allow_html=True)
|
| 32 |
-
|
| 33 |
-
# Session state to control flow
|
| 34 |
-
if "stage" not in st.session_state:
|
| 35 |
-
st.session_state.stage = "upload"
|
| 36 |
-
|
| 37 |
-
# 1) Upload stage
|
| 38 |
-
if st.session_state.stage == "upload":
|
| 39 |
-
uploader = st.file_uploader("", type=["png","jpg","jpeg"], label_visibility="collapsed")
|
| 40 |
-
gen_button = st.button("Generate Report", disabled=(uploader is None))
|
| 41 |
-
|
| 42 |
-
if uploader:
|
| 43 |
-
st.image(uploader, width=400, caption=f"{uploader.name} ({uploader.size/1e6:.2f} MB)")
|
| 44 |
-
if gen_button:
|
| 45 |
-
st.session_state.uploaded = uploader
|
| 46 |
-
st.session_state.stage = "report"
|
| 47 |
-
|
| 48 |
-
# 2) Report stage
|
| 49 |
-
elif st.session_state.stage == "report":
|
| 50 |
-
img_file = st.session_state.uploaded
|
| 51 |
-
img = Image.open(img_file).convert("RGB")
|
| 52 |
-
|
| 53 |
-
# Run inference once
|
| 54 |
-
with st.spinner("Analyzing X-rayβ¦"):
|
| 55 |
-
# 1) preprocess
|
| 56 |
-
x = transform(img).unsqueeze(0).to(device)
|
| 57 |
-
# 2) vision features
|
| 58 |
-
vfeat = vit(pixel_values=x).pooler_output # [1,H]
|
| 59 |
-
# 3) prepare a dummy prompt - adapt if your model expects something else
|
| 60 |
-
prompt = "report:"
|
| 61 |
-
enc = tokenizer(prompt, return_tensors="pt").to(device)
|
| 62 |
-
# 4) generate
|
| 63 |
-
gen_ids = t5.generate(
|
| 64 |
-
input_ids=enc.input_ids,
|
| 65 |
-
attention_mask=enc.attention_mask,
|
| 66 |
-
max_length=64,
|
| 67 |
-
num_beams=4,
|
| 68 |
-
no_repeat_ngram_size=2,
|
| 69 |
-
early_stopping=True,
|
| 70 |
-
length_penalty=1.2,
|
| 71 |
-
)
|
| 72 |
-
diagnosis = tokenizer.decode(gen_ids[0], skip_special_tokens=True)
|
| 73 |
-
confidence = 94 # or compute a proxy if you have one
|
| 74 |
-
|
| 75 |
-
# Display results
|
| 76 |
-
col1, col2 = st.columns([1,1])
|
| 77 |
-
with col1:
|
| 78 |
-
st.subheader("Your Uploaded X-ray")
|
| 79 |
-
st.image(img, use_column_width=True)
|
| 80 |
-
st.markdown(f"**File:** {img_file.name} \n**Size:** {img_file.size/1e6:.2f} MB")
|
| 81 |
-
with col2:
|
| 82 |
-
st.subheader("AI Diagnosis & Report")
|
| 83 |
-
st.markdown(
|
| 84 |
-
f"<div style='background:#e0f7fa;padding:12px;border-radius:6px;'>"
|
| 85 |
-
f"<strong>Primary Diagnosis</strong><br>{diagnosis}</div>",
|
| 86 |
-
unsafe_allow_html=True
|
| 87 |
-
)
|
| 88 |
-
st.markdown(f"**Confidence:** {confidence}%")
|
| 89 |
-
|
| 90 |
-
if st.button("β¬
οΈ Upload Another"):
|
| 91 |
-
# reset
|
| 92 |
-
st.session_state.stage = "upload"
|
| 93 |
-
del st.session_state.uploaded
|
| 94 |
-
|
| 95 |
-
# βββββ Footer ββββββββββββββββββββββββββββββ
|
| 96 |
-
st.markdown("""
|
| 97 |
-
<hr style='margin:2em 0;'>
|
| 98 |
-
<p style='font-size:0.8em;color:gray;text-align:center;'>
|
| 99 |
-
This app runs a fine-tuned VisionβReport model from Hugging Face Space.
|
| 100 |
-
</p>
|
| 101 |
-
""", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|