ChatBotsTA's picture
Update app.py
9e63400 verified
import streamlit as st
from transformers import pipeline
import torch
# Set page config
st.set_page_config(
page_title="Tweet Classifier",
page_icon="🐦",
layout="wide"
)
# Custom CSS for better styling
st.markdown("""
<style>
.main-header {
font-size: 3rem;
color: #1DA1F2;
text-align: center;
margin-bottom: 2rem;
}
.result-box {
background-color: #f0f2f6;
padding: 2rem;
border-radius: 10px;
margin-top: 2rem;
}
.confidence-bar {
height: 20px;
background: linear-gradient(90deg, #ff4b4b 0%, #ffa500 50%, #00cc00 100%);
border-radius: 10px;
margin: 10px 0;
}
.label-badge {
padding: 0.5rem 1rem;
border-radius: 20px;
font-weight: bold;
margin: 0.2rem;
display: inline-block;
}
</style>
""", unsafe_allow_html=True)
# App title
st.markdown('<h1 class="main-header">🐦 Tweet Sentiment Classifier</h1>', unsafe_allow_html=True)
st.markdown("### Real-time AI-powered text classification") # ← Changed this line
# Initialize model
@st.cache_resource
def load_model():
try:
model_name = "ChatBotsTA/distilbert-tweet-classifier"
classifier = pipeline(
"text-classification",
model=model_name,
tokenizer=model_name,
device=0 if torch.cuda.is_available() else -1
)
return classifier
except Exception as e:
st.error(f"Error loading model: {e}")
return None
# Load model
with st.spinner("πŸš€ Loading your fine-tuned model from Hugging Face..."):
classifier = load_model()
if classifier is None:
st.error("Could not load the model. Please check if the model exists on Hugging Face.")
st.stop()
# Label info
label_colors = {"positive": "🟒", "negative": "πŸ”΄", "litigious": "πŸ”΅", "uncertainty": "🟑"}
label_descriptions = {
"positive": "Positive sentiment/content",
"negative": "Negative sentiment",
"litigious": "Legal/contractual content",
"uncertainty": "Uncertain/ambiguous content"
}
badge_colors = {"positive": "#4CAF50", "negative": "#F44336", "litigious": "#2196F3", "uncertainty": "#FFC107"}
# Input section
st.markdown("---")
st.markdown("## πŸ“ Enter Tweet Text to Analyze")
input_text = st.text_area(
"Paste tweet text here:",
height=150,
placeholder="Enter text to classify (e.g., 'This product is amazing!', 'I hate this service', 'The court case was dismissed')"
)
# Examples
with st.expander("πŸ’‘ Click for example texts"):
st.write("**Examples to try:**")
examples = [
"This is an amazing product! I love it!",
"I'm so frustrated with this service, terrible experience",
"The court case was dismissed due to lack of evidence",
"I'm not sure how I feel about this situation"
]
for example in examples:
if st.button(example, key=example):
input_text = example
# Analyze button
if st.button("πŸ” Analyze Tweet", type="primary", use_container_width=True):
if input_text.strip():
with st.spinner("Analyzing..."):
try:
result = classifier(input_text)[0]
label = result['label']
confidence = result['score']
st.markdown("---")
st.markdown("## πŸ“Š Analysis Results")
st.markdown('<div class="result-box">', unsafe_allow_html=True)
col1, col2 = st.columns([1, 2])
with col1:
st.markdown(f"### {label_colors.get(label, 'βšͺ')} **Prediction:**")
color = badge_colors.get(label, "#9E9E9E")
st.markdown(f'<span class="label-badge" style="background-color: {color}; color: white;">{label.upper()}</span>', unsafe_allow_html=True)
with col2:
st.markdown(f"### πŸ“ˆ **Confidence:** {confidence:.1%}")
st.markdown(f'<div class="confidence-bar" style="width: {confidence*100}%;"></div>', unsafe_allow_html=True)
st.markdown(f"**Description:** {label_descriptions.get(label, '')}")
st.markdown('</div>', unsafe_allow_html=True)
except Exception as e:
st.error(f"Error during prediction: {e}")
else:
st.warning("Please enter some text to analyze!")
# Model info section
st.markdown("---")
st.markdown("## ℹ️ About This Model")
# Using simple text instead of triple-quoted string
st.info("**Model Details:**\n"
"- **Base Model**: DistilBERT-base-uncased\n"
"- **Training**: Fine-tuned on 50,000 tweets\n"
"- **Accuracy**: 96.4% on validation set\n"
"- **Labels**: Positive, Negative, Litigious, Uncertainty\n"
"- **Created By**: You! 🎯\n\n"
"**How to use programmatically:**\n"
"```python\n"
"from transformers import pipeline\n"
"classifier = pipeline('text-classification', \n"
" model='ChatBotsTA/distilbert-tweet-classifier')\n"
"result = classifier('Your text here')\n"
"```")
# Footer
st.markdown("---")
st.markdown('<div style="text-align: center"><p>Built with ❀️ using your fine-tuned model | <a href="https://huggingface.co/ChatBotsTA/distilbert-tweet-classifier">View on Hugging Face</a></p></div>', unsafe_allow_html=True)