import streamlit as st from transformers import pipeline import torch # Set page config st.set_page_config( page_title="Tweet Classifier", page_icon="đŸĻ", layout="wide" ) # Custom CSS for better styling st.markdown(""" """, unsafe_allow_html=True) # App title st.markdown('

đŸĻ Tweet Sentiment Classifier

', unsafe_allow_html=True) st.markdown("### Real-time AI-powered text classification") # ← Changed this line # Initialize model @st.cache_resource def load_model(): try: model_name = "ChatBotsTA/distilbert-tweet-classifier" classifier = pipeline( "text-classification", model=model_name, tokenizer=model_name, device=0 if torch.cuda.is_available() else -1 ) return classifier except Exception as e: st.error(f"Error loading model: {e}") return None # Load model with st.spinner("🚀 Loading your fine-tuned model from Hugging Face..."): classifier = load_model() if classifier is None: st.error("Could not load the model. Please check if the model exists on Hugging Face.") st.stop() # Label info label_colors = {"positive": "đŸŸĸ", "negative": "🔴", "litigious": "đŸ”ĩ", "uncertainty": "🟡"} label_descriptions = { "positive": "Positive sentiment/content", "negative": "Negative sentiment", "litigious": "Legal/contractual content", "uncertainty": "Uncertain/ambiguous content" } badge_colors = {"positive": "#4CAF50", "negative": "#F44336", "litigious": "#2196F3", "uncertainty": "#FFC107"} # Input section st.markdown("---") st.markdown("## 📝 Enter Tweet Text to Analyze") input_text = st.text_area( "Paste tweet text here:", height=150, placeholder="Enter text to classify (e.g., 'This product is amazing!', 'I hate this service', 'The court case was dismissed')" ) # Examples with st.expander("💡 Click for example texts"): st.write("**Examples to try:**") examples = [ "This is an amazing product! I love it!", "I'm so frustrated with this service, terrible experience", "The court case was dismissed due to lack of evidence", "I'm not sure how I feel about this situation" ] for example in examples: if st.button(example, key=example): input_text = example # Analyze button if st.button("🔍 Analyze Tweet", type="primary", use_container_width=True): if input_text.strip(): with st.spinner("Analyzing..."): try: result = classifier(input_text)[0] label = result['label'] confidence = result['score'] st.markdown("---") st.markdown("## 📊 Analysis Results") st.markdown('
', unsafe_allow_html=True) col1, col2 = st.columns([1, 2]) with col1: st.markdown(f"### {label_colors.get(label, 'âšĒ')} **Prediction:**") color = badge_colors.get(label, "#9E9E9E") st.markdown(f'{label.upper()}', unsafe_allow_html=True) with col2: st.markdown(f"### 📈 **Confidence:** {confidence:.1%}") st.markdown(f'
', unsafe_allow_html=True) st.markdown(f"**Description:** {label_descriptions.get(label, '')}") st.markdown('
', unsafe_allow_html=True) except Exception as e: st.error(f"Error during prediction: {e}") else: st.warning("Please enter some text to analyze!") # Model info section st.markdown("---") st.markdown("## â„šī¸ About This Model") # Using simple text instead of triple-quoted string st.info("**Model Details:**\n" "- **Base Model**: DistilBERT-base-uncased\n" "- **Training**: Fine-tuned on 50,000 tweets\n" "- **Accuracy**: 96.4% on validation set\n" "- **Labels**: Positive, Negative, Litigious, Uncertainty\n" "- **Created By**: You! đŸŽ¯\n\n" "**How to use programmatically:**\n" "```python\n" "from transformers import pipeline\n" "classifier = pipeline('text-classification', \n" " model='ChatBotsTA/distilbert-tweet-classifier')\n" "result = classifier('Your text here')\n" "```") # Footer st.markdown("---") st.markdown('

Built with â¤ī¸ using your fine-tuned model | View on Hugging Face

', unsafe_allow_html=True)