email_processor / src /streamlit_app.py
byinab's picture
Update src/streamlit_app.py
815c4cc verified
raw
history blame
5.98 kB
import streamlit as st
import torch
from transformers import pipeline
import time
# Page config
st.set_page_config(
page_title="πŸ“§ Email Reply Assistant",
page_icon="πŸ“§",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS
st.markdown("""
<style>
.main-header { font-size: 3rem; color: #1f77b4; text-align: center; margin-bottom: 2rem; }
.pipeline-card { background: #f8f9fa; padding: 1.5rem; border-radius: 10px; border-left: 5px solid #1f77b4; }
.status-success { color: #28a745; font-weight: bold; }
.status-loading { color: #ffc107; font-weight: bold; }
</style>
""", unsafe_allow_html=True)
@st.cache_resource
def load_pipelines():
"""Load all 3 pipelines with GPU support"""
with st.spinner("Loading AI models... This takes ~2 minutes"):
classifier = pipeline(
"text-classification",
model="distilbert-base-uncased",
device=0 if torch.cuda.is_available() else -1
)
generator = pipeline(
"text-generation",
model="Kunal7370944861/Email-Writer-AI",
device=0 if torch.cuda.is_available() else -1
)
translator = pipeline(
"translation",
model="DDDSSS/translation_en-zh",
device=0 if torch.cuda.is_available() else -1
)
return classifier, generator, translator
# Load pipelines
try:
classifier, generator, translator = load_pipelines()
st.success("βœ… All 3 AI pipelines loaded!")
except Exception as e:
st.error(f"❌ Pipeline loading failed: {str(e)}")
st.stop()
# Helper functions (same logic)
def classify_email(text: str, classifier):
result = classifier(text[:512])[0]
return result["label"], float(result["score"])
def build_prompt(email_text: str, category: str) -> str:
return f"""You are a helpful customer service agent.
Email category: {category}
Customer email:
{email_text}
Write a polite, concise reply template.
Reply:"""
def generate_reply(prompt: str, generator):
outputs = generator(
prompt,
max_length=300,
num_return_sequences=1,
do_sample=True,
temperature=0.7,
pad_token_id=generator.tokenizer.eos_token_id
)
full_text = outputs[0]["generated_text"]
if "Reply:" in full_text:
return full_text.split("Reply:", 1)[-1].strip()
return full_text.replace(prompt, "").strip()
def translate_reply(text: str, translator):
if not text.strip():
return ""
outputs = translator(text)
return outputs[0]["translation_text"].strip()
# Main title
st.markdown('<h1 class="main-header">πŸ€– Email Reply Assistant</h1>', unsafe_allow_html=True)
st.markdown("**Classify β†’ Generate Reply β†’ Translate to Chinese** β€’ Powered by 3 Transformer models")
# Sidebar info
with st.sidebar:
st.header("ℹ️ Pipeline Info")
st.markdown("""
**Pipeline 1**: `distilbert-base-uncased` β†’ Email classification
**Pipeline 2**: `Kunal7370944861/Email-Writer-AI` β†’ Reply generation
**Pipeline 3**: `DDDSSS/translation_en-zh` β†’ Chinese translation
**Status**: βœ… All models loaded
""")
st.markdown("---")
st.info("πŸ‘ˆ Paste email β†’ Click Process β†’ Get instant replies!")
# Main content
col1, col2 = st.columns([1, 2])
with col1:
st.header("πŸ“¨ Input Email")
# Email input
email_text = st.text_area(
"Paste your email here...",
placeholder="Subject: Problem with order\n\nHello,\n\nMy package arrived damaged...",
height=200,
help="Paste complete email (subject + body)"
)
col_btn1, col_btn2 = st.columns(2)
with col_btn1:
process_btn = st.button("πŸš€ PROCESS EMAIL", type="primary", use_container_width=True)
with col_btn2:
if st.button("🧹 CLEAR", use_container_width=True):
st.rerun()
with col2:
if process_btn and email_text.strip():
with st.spinner("πŸ”„ Processing through 3 AI pipelines..."):
# Pipeline 1: Classification
with st.container():
st.markdown('<div class="pipeline-card"><h3>πŸ”’ Pipeline 1: Classification</h3>', unsafe_allow_html=True)
label, score = classify_email(email_text, classifier)
col_a, col_b = st.columns(2)
col_a.metric("Recommended Tag", label)
col_b.metric("Confidence", f"{score:.1%}")
st.markdown('</div>', unsafe_allow_html=True)
# Pipeline 2: Reply Generation
with st.container():
st.markdown('<div class="pipeline-card"><h3>βœ‰οΈ Pipeline 2: English Reply</h3>', unsafe_allow_html=True)
prompt = build_prompt(email_text, label)
reply_en = generate_reply(prompt, generator)
st.text_area("English Reply Template", reply_en, height=150, disabled=True)
st.markdown('</div>', unsafe_allow_html=True)
# Pipeline 3: Translation
with st.container():
st.markdown('<div class="pipeline-card"><h3>πŸ‡¨πŸ‡³ Pipeline 3: Chinese Translation</h3>', unsafe_allow_html=True)
reply_zh = translate_reply(reply_en, translator)
st.text_area("δΈ­ζ–‡ε›žε€ζ¨‘ζΏ", reply_zh, height=150, disabled=True)
st.markdown('</div>', unsafe_allow_html=True)
# Copy buttons
st.markdown("---")
col_c, col_d = st.columns(2)
with col_c:
st.download_button("πŸ“₯ Download English", reply_en, "reply_en.txt")
with col_d:
st.download_button("πŸ“₯ Download Chinese", reply_zh, "reply_zh.txt")
else:
st.info("πŸ‘† **Paste an email and click 'PROCESS EMAIL'** to see the magic!")
# Footer
st.markdown("---")
st.markdown("*Built with ❀️ using Streamlit + Transformers β€’ Deployed on Hugging Face Spaces*")