File size: 1,873 Bytes
c9d2fa0 ac9c332 d17b90e c9d2fa0 d17b90e c9d2fa0 d17b90e ac9c332 d17b90e a7c3e81 d17b90e c9d2fa0 d17b90e c9d2fa0 d17b90e c9d2fa0 d17b90e c9d2fa0 d17b90e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
# streamlit_app.py
import os
import streamlit as st
from transformers import pipeline
# -----------------------------
# Ensure cache dirs are writable in Spaces
# -----------------------------
os.environ.setdefault("HF_HOME", "/tmp/huggingface")
os.environ.setdefault("TRANSFORMERS_CACHE", "/tmp/huggingface/transformers")
os.environ.setdefault("HF_DATASETS_CACHE", "/tmp/huggingface/datasets")
os.environ.setdefault("HUGGINGFACE_HUB_CACHE", "/tmp/huggingface/hub")
os.environ.setdefault("XDG_CACHE_HOME", "/tmp/huggingface")
# Hardcoded model repo
MODEL_ID = "kirubel1738/biogpt-pubmedqa-finetuned"
@st.cache_resource
def load_model():
"""Load BioGPT model (on CPU)."""
generator = pipeline("text-generation", model=MODEL_ID, device=-1)
return generator
# Load once
generator = load_model()
# -----------------------------
# Streamlit UI
# -----------------------------
st.set_page_config(page_title="BioGPT β PubMedQA demo", layout="centered")
st.title("𧬠BioGPT β PubMedQA Demo")
st.write("Ask a biomedical question and get an answer generated by BioGPT fine-tuned on PubMedQA.")
user_input = st.text_area("Enter your biomedical question:", height=150)
if st.button("Get Answer"):
if user_input.strip():
with st.spinner("Generating answer..."):
try:
result = generator(
user_input,
max_new_tokens=128,
do_sample=True,
temperature=0.7
)
output_text = result[0]["generated_text"]
st.success("Answer:")
st.write(output_text)
except Exception as e:
st.error(f"Generation failed: {e}")
else:
st.warning("Please enter a question.")
st.markdown("---")
st.caption("Model: kirubel1738/biogpt-pubmedqa-finetuned | Runs on CPU")
|