# import streamlit as st
# from transformers import pipeline
# # ---- Load both models ----
# @st.cache_resource
# def load_summarizers():
# model_name_ft = "AIsumit123/flan-t5-base_samsum_best_ckpt" # your fine-tuned
# summarizer_ft = pipeline("summarization", model=model_name_ft, tokenizer=model_name_ft)
# model_name_ft2 = "philschmid/flan-t5-base-samsum" # comparison fine-tuned
# summarizer_ft2 = pipeline("summarization", model=model_name_ft2, tokenizer=model_name_ft2)
# model_name = "google/flan-t5-base" # pretrained
# summarizer = pipeline("summarization", model=model_name, tokenizer=model_name)
# return summarizer_ft, summarizer_ft2, summarizer
# summarizer_ft, summarizer_ft2, summarizer = load_summarizers()
# # ---- Streamlit Page Config ----
# st.set_page_config(page_title="Conversation Summarizer", page_icon="🤖", layout="wide")
# # ---- Custom CSS for Styling ----
# st.markdown("""
#
# """, unsafe_allow_html=True)
# # ---- Header Section ----
# st.markdown("
🤖 Conversation Summarizer
", unsafe_allow_html=True)
# st.markdown("✨ Compare Pretrained vs Fine-tuned FLAN-T5 Models ✨
", unsafe_allow_html=True)
# st.write("Paste your **conversation** below and instantly compare how fine-tuning changes summary quality.")
# # ---- Text Input ----
# input_text = st.text_area(
# "💬 Conversation Input:",
# height=250,
# placeholder="Person A: Hi, how are you?\nPerson B: I'm good, just finished work...",
# )
# # ---- Sidebar ----
# st.sidebar.header("⚙️ Summary Settings")
# max_length = st.sidebar.slider("Max summary length", 30, 200, 100, step=10)
# min_length = st.sidebar.slider("Min summary length", 10, 100, 30, step=5)
# num_beams = st.sidebar.slider("Number of beams", 1, 8, 4)
# # ---- Generate Button ----
# if st.button("✨ Generate Summaries"):
# if input_text.strip():
# with st.spinner("🧠 Models are thinking..."):
# base_summary = summarizer(
# input_text,
# max_length=max_length,
# min_length=min_length,
# num_beams=num_beams,
# early_stopping=True,
# )[0]["summary_text"]
# ft_summary = summarizer_ft(
# input_text,
# max_length=max_length,
# min_length=min_length,
# num_beams=num_beams,
# early_stopping=True,
# )[0]["summary_text"]
# ft_summary2 = summarizer_ft2(
# input_text,
# max_length=max_length,
# min_length=min_length,
# num_beams=num_beams,
# early_stopping=True,
# )[0]["summary_text"]
# st.success("✅ Summaries Generated!")
# # ---- Display Results ----
# st.markdown("
", unsafe_allow_html=True)
# col1, col2, col3 = st.columns(3)
# with col1:
# st.markdown(''.format(base_summary), unsafe_allow_html=True)
# with col2:
# st.markdown(''.format(ft_summary), unsafe_allow_html=True)
# with col3:
# st.markdown('🔬 Fine-tuned (Reference)
{}
'.format(ft_summary2), unsafe_allow_html=True)
# st.markdown("
", unsafe_allow_html=True)
# else:
# st.warning("⚠️ Please enter a conversation to summarize.")
import streamlit as st
from transformers import pipeline
# ---- Load both models ----
@st.cache_resource
def load_summarizers():
model_name_ft = "AIsumit123/flan-t5-base_samsum_best_ckpt" # your fine-tuned
summarizer_ft = pipeline("summarization", model=model_name_ft, tokenizer=model_name_ft)
model_name_ft2 = "philschmid/flan-t5-base-samsum" # comparison fine-tuned
summarizer_ft2 = pipeline("summarization", model=model_name_ft2, tokenizer=model_name_ft2)
model_name = "google/flan-t5-base" # pretrained
summarizer = pipeline("summarization", model=model_name, tokenizer=model_name)
return summarizer_ft, summarizer_ft2, summarizer
summarizer_ft, summarizer_ft2, summarizer = load_summarizers()
# ---- Streamlit Page Config ----
st.set_page_config(page_title="Conversation Summarizer", page_icon="🤖", layout="wide")
# ---- Custom CSS for Styling ----
st.markdown("""
""", unsafe_allow_html=True)
# ---- Header Section ----
st.markdown("🤖 Conversation Summarizer
", unsafe_allow_html=True)
st.markdown("✨ Compare Pretrained vs Fine-tuned FLAN-T5 Models ✨
", unsafe_allow_html=True)
st.write("Paste your **conversation** below and instantly compare how fine-tuning changes summary quality.")
# ---- Example Conversations ----
example_conversations = {
"Select an example...": "",
"Business Meeting": """Alex: Are we ready for the client presentation tomorrow?
Sarah: Almost. I just need to finalize the quarterly figures.
Mike: The slides are done, but we should rehearse the demo.
Alex: Let's meet at 3 PM today for a dry run.
Sarah: I'll bring the updated reports.
Mike: Perfect, I'll set up the conference room.""",
"Casual Chat": """Tom: Hey, are you watching the game tonight?
Lisa: Which one? The championship?
Tom: Yeah, it starts at 8. Want to come over?
Lisa: Sure! Should I bring anything?
Tom: Just yourself! Maybe some snacks.
Lisa: Awesome, see you at 7:30!""",
"Customer Support": """Agent: Thank you for calling support. How can I help?
Customer: I can't login to my account.
Agent: Are you getting an error message?
Customer: It says 'invalid password' but I'm sure it's correct.
Agent: Let me reset your password. Check your email for a link.
Customer: Got it! Thanks for your help."""
}
# ---- Text Input ----
selected_example = st.selectbox("Choose an example conversation:", list(example_conversations.keys()))
input_text = st.text_area(
"💬 Conversation Input:",
height=250,
value=example_conversations[selected_example],
placeholder="Person A: Hi, how are you?\nPerson B: I'm good, just finished work...",
)
# ---- Sidebar ----
st.sidebar.header("⚙️ Summary Settings")
max_length = st.sidebar.slider("Max summary length", 30, 200, 100, step=10)
min_length = st.sidebar.slider("Min summary length", 10, 100, 30, step=5)
num_beams = st.sidebar.slider("Number of beams", 1, 8, 4, help="Higher values = better quality but slower")
with st.sidebar.expander("Advanced Settings"):
repetition_penalty = st.slider("Repetition penalty", 1.0, 2.0, 1.2, 0.1)
length_penalty = st.slider("Length penalty", 0.5, 2.0, 1.0, 0.1)
# ---- Generate Button ----
if st.button("✨ Generate Summaries", use_container_width=True):
if input_text.strip():
with st.spinner("🧠 Models are thinking..."):
try:
base_summary = summarizer(
input_text,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
early_stopping=True,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
)[0]["summary_text"]
ft_summary = summarizer_ft(
input_text,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
early_stopping=True,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
)[0]["summary_text"]
ft_summary2 = summarizer_ft2(
input_text,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
early_stopping=True,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
)[0]["summary_text"]
st.success("✅ Summaries Generated!")
# ---- Display Results ----
st.markdown("
", unsafe_allow_html=True)
# Stats row
col1, col2, col3, col4 = st.columns(4)
with col1:
st.markdown(f'Original Length
{len(input_text.split())} words
', unsafe_allow_html=True)
with col2:
st.markdown(f'Base Summary
{len(base_summary.split())} words
', unsafe_allow_html=True)
with col3:
st.markdown(f'Your Model
{len(ft_summary.split())} words
', unsafe_allow_html=True)
with col4:
st.markdown(f'Reference Model
{len(ft_summary2.split())} words
', unsafe_allow_html=True)
# Summary cards
st.markdown("
", unsafe_allow_html=True)
col1, col2, col3 = st.columns(3)
with col1:
st.markdown(f'''
🧠 Base Model
{base_summary}
''', unsafe_allow_html=True)
with col2:
st.markdown(f'''
🚀 Your Fine-tuned
{ft_summary}
''', unsafe_allow_html=True)
with col3:
st.markdown(f'''
🔬 Reference Model
{ft_summary2}
''', unsafe_allow_html=True)
st.markdown("
", unsafe_allow_html=True)
except Exception as e:
st.error(f"❌ Error generating summaries: {str(e)}")
else:
st.warning("⚠️ Please enter a conversation to summarize.")
# ---- Footer ----
st.markdown("---")
st.markdown(
""
"Built with ❤️ using Streamlit & Hugging Face Transformers"
"
",
unsafe_allow_html=True
)