File size: 5,410 Bytes
5bf556d 4e6c7b5 5bf556d 4e6c7b5 5bf556d 9e63400 5bf556d 4e6c7b5 5bf556d 4e6c7b5 5bf556d 4e6c7b5 daca736 5bf556d 4e6c7b5 5bf556d daca736 5bf556d daca736 5bf556d 4e6c7b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import streamlit as st
from transformers import pipeline
import torch
# Set page config
st.set_page_config(
page_title="Tweet Classifier",
page_icon="π¦",
layout="wide"
)
# Custom CSS for better styling
st.markdown("""
<style>
.main-header {
font-size: 3rem;
color: #1DA1F2;
text-align: center;
margin-bottom: 2rem;
}
.result-box {
background-color: #f0f2f6;
padding: 2rem;
border-radius: 10px;
margin-top: 2rem;
}
.confidence-bar {
height: 20px;
background: linear-gradient(90deg, #ff4b4b 0%, #ffa500 50%, #00cc00 100%);
border-radius: 10px;
margin: 10px 0;
}
.label-badge {
padding: 0.5rem 1rem;
border-radius: 20px;
font-weight: bold;
margin: 0.2rem;
display: inline-block;
}
</style>
""", unsafe_allow_html=True)
# App title
st.markdown('<h1 class="main-header">π¦ Tweet Sentiment Classifier</h1>', unsafe_allow_html=True)
st.markdown("### Real-time AI-powered text classification") # β Changed this line
# Initialize model
@st.cache_resource
def load_model():
try:
model_name = "ChatBotsTA/distilbert-tweet-classifier"
classifier = pipeline(
"text-classification",
model=model_name,
tokenizer=model_name,
device=0 if torch.cuda.is_available() else -1
)
return classifier
except Exception as e:
st.error(f"Error loading model: {e}")
return None
# Load model
with st.spinner("π Loading your fine-tuned model from Hugging Face..."):
classifier = load_model()
if classifier is None:
st.error("Could not load the model. Please check if the model exists on Hugging Face.")
st.stop()
# Label info
label_colors = {"positive": "π’", "negative": "π΄", "litigious": "π΅", "uncertainty": "π‘"}
label_descriptions = {
"positive": "Positive sentiment/content",
"negative": "Negative sentiment",
"litigious": "Legal/contractual content",
"uncertainty": "Uncertain/ambiguous content"
}
badge_colors = {"positive": "#4CAF50", "negative": "#F44336", "litigious": "#2196F3", "uncertainty": "#FFC107"}
# Input section
st.markdown("---")
st.markdown("## π Enter Tweet Text to Analyze")
input_text = st.text_area(
"Paste tweet text here:",
height=150,
placeholder="Enter text to classify (e.g., 'This product is amazing!', 'I hate this service', 'The court case was dismissed')"
)
# Examples
with st.expander("π‘ Click for example texts"):
st.write("**Examples to try:**")
examples = [
"This is an amazing product! I love it!",
"I'm so frustrated with this service, terrible experience",
"The court case was dismissed due to lack of evidence",
"I'm not sure how I feel about this situation"
]
for example in examples:
if st.button(example, key=example):
input_text = example
# Analyze button
if st.button("π Analyze Tweet", type="primary", use_container_width=True):
if input_text.strip():
with st.spinner("Analyzing..."):
try:
result = classifier(input_text)[0]
label = result['label']
confidence = result['score']
st.markdown("---")
st.markdown("## π Analysis Results")
st.markdown('<div class="result-box">', unsafe_allow_html=True)
col1, col2 = st.columns([1, 2])
with col1:
st.markdown(f"### {label_colors.get(label, 'βͺ')} **Prediction:**")
color = badge_colors.get(label, "#9E9E9E")
st.markdown(f'<span class="label-badge" style="background-color: {color}; color: white;">{label.upper()}</span>', unsafe_allow_html=True)
with col2:
st.markdown(f"### π **Confidence:** {confidence:.1%}")
st.markdown(f'<div class="confidence-bar" style="width: {confidence*100}%;"></div>', unsafe_allow_html=True)
st.markdown(f"**Description:** {label_descriptions.get(label, '')}")
st.markdown('</div>', unsafe_allow_html=True)
except Exception as e:
st.error(f"Error during prediction: {e}")
else:
st.warning("Please enter some text to analyze!")
# Model info section
st.markdown("---")
st.markdown("## βΉοΈ About This Model")
# Using simple text instead of triple-quoted string
st.info("**Model Details:**\n"
"- **Base Model**: DistilBERT-base-uncased\n"
"- **Training**: Fine-tuned on 50,000 tweets\n"
"- **Accuracy**: 96.4% on validation set\n"
"- **Labels**: Positive, Negative, Litigious, Uncertainty\n"
"- **Created By**: You! π―\n\n"
"**How to use programmatically:**\n"
"```python\n"
"from transformers import pipeline\n"
"classifier = pipeline('text-classification', \n"
" model='ChatBotsTA/distilbert-tweet-classifier')\n"
"result = classifier('Your text here')\n"
"```")
# Footer
st.markdown("---")
st.markdown('<div style="text-align: center"><p>Built with β€οΈ using your fine-tuned model | <a href="https://huggingface.co/ChatBotsTA/distilbert-tweet-classifier">View on Hugging Face</a></p></div>', unsafe_allow_html=True) |