waleed-12's picture
Update src/streamlit_app.py
3004889 verified
raw
history blame
1.62 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
# ---- Configuration ----
MODEL_NAME = "AbdullahAlnemr1/flan-t5-summarizer"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ---- Load model and tokenizer ----
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float32)
model.to(device)
return tokenizer, model
tokenizer, model = load_model()
# ---- Streamlit App ----
st.title("Text Summarizer")
st.write("Generate concise summariy.")
# ---- Input Area ----
article = st.text_area("Enter the article or passage to summarize:", height=250)
# ---- Parameters ----
max_input_len = 512
max_output_len = 150
# ---- Generate Summary ----
if st.button("Generate Summary"):
if not article.strip():
st.warning("Please enter some text to summarize.")
else:
with st.spinner("Generating summary..."):
inputs = tokenizer(
article,
return_tensors="pt",
max_length=max_input_len,
truncation=True
).to(device)
summary_ids = model.generate(
**inputs,
max_length=max_output_len,
num_beams=4,
length_penalty=2.0,
early_stopping=True
)
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
# ---- Output ----
st.subheader("Generated Summary:")
st.write(summary)