waleed-12 commited on
Commit
3472bbd
·
verified ·
1 Parent(s): 134a9d1

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +46 -20
src/streamlit_app.py CHANGED
@@ -2,32 +2,58 @@ import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import torch
4
 
5
- # Model name
6
- MODEL_NAME = "AbdullahAlnemr1/flan-t5-summarizer"
 
7
 
8
- # Load tokenizer and model
9
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
 
 
 
 
11
 
12
- st.title("Text Summarizer(Encoder-Decoder)")
13
 
14
- input_text = st.text_area("Enter text to summarize:", height=200)
 
 
15
 
16
- max_new_tokens = st.slider("Max summary length", min_value=20, max_value=200, value=100)
 
17
 
 
 
 
 
 
18
  if st.button("Generate Summary"):
19
- if input_text.strip() == "":
20
  st.warning("Please enter some text to summarize.")
21
  else:
22
- # Tokenize input
23
- inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
24
- # Generate summary
25
- outputs = model.generate(
26
- inputs["input_ids"],
27
- max_new_tokens=max_new_tokens,
28
- num_beams=4,
29
- early_stopping=True
30
- )
31
- summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
- st.subheader("Summary:")
 
 
 
 
 
 
 
 
 
33
  st.write(summary)
 
 
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import torch
4
 
5
+ # ---- Configuration ----
6
+ MODEL_NAME = "AbdullahAlnemr1/flan-t5-summarizer"
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
9
+ # ---- Load model and tokenizer ----
10
+ @st.cache_resource
11
+ def load_model():
12
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float32)
14
+ model.to(device)
15
+ return tokenizer, model
16
 
17
+ tokenizer, model = load_model()
18
 
19
+ # ---- Streamlit App ----
20
+ st.title("Text Summarizer")
21
+ st.write("Generate concise summaries from long articles using a fine-tuned Encoder–Decoder model.")
22
 
23
+ # ---- Input Area ----
24
+ article = st.text_area("Enter the article or passage to summarize:", height=250)
25
 
26
+ # ---- Parameters ----
27
+ max_input_len = 512
28
+ max_output_len = 150
29
+
30
+ # ---- Generate Summary ----
31
  if st.button("Generate Summary"):
32
+ if not article.strip():
33
  st.warning("Please enter some text to summarize.")
34
  else:
35
+ with st.spinner("Generating summary..."):
36
+ inputs = tokenizer(
37
+ article,
38
+ return_tensors="pt",
39
+ max_length=max_input_len,
40
+ truncation=True
41
+ ).to(device)
42
+
43
+ summary_ids = model.generate(
44
+ **inputs,
45
+ max_length=max_output_len,
46
+ num_beams=4,
47
+ length_penalty=2.0,
48
+ early_stopping=True
49
+ )
50
+
51
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
52
+
53
+ # ---- Output ----
54
+ st.subheader("Generated Summary:")
55
  st.write(summary)
56
+
57
+ # ---- Footer ----
58
+ st.markdown("---")
59
+ st.markdown("Model powered by Transformers | Streamlit App by Ali Hamza")