Spaces:
Sleeping
Sleeping
File size: 3,315 Bytes
cce95b5 26c905c adf3172 9788e26 26c905c adf3172 26c905c adf3172 9788e26 adf3172 9788e26 26c905c adf3172 9788e26 26c905c adf3172 9788e26 adf3172 9788e26 adf3172 9788e26 adf3172 26c905c adf3172 9788e26 adf3172 9788e26 26c905c 9788e26 fdc91d2 9788e26 26c905c 9788e26 26c905c adf3172 26c905c adf3172 0c26e05 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
from __future__ import annotations
import json
import sys
from pathlib import Path
from typing import Dict
import pandas as pd
import streamlit as st
from huggingface_hub import hf_hub_download
import joblib
# -------------------------------------------------------------------------
# Page Configuration
# -------------------------------------------------------------------------
st.set_page_config(
page_title="Twitter Sentiment Intelligence",
page_icon="💼",
layout="wide",
)
st.title("Twitter Sentiment Intelligence")
st.caption("Streamlit front-end for the Deloitte-ready Twitter Sentiment Intelligence dashboard.")
try:
# -------------------------------------------------------------------------
# Download model from Hugging Face Hub (no local artifacts needed)
# -------------------------------------------------------------------------
MODEL_REPO = "vishnu-coder/twitter-sentiment-model"
MODEL_FILENAME = "sentiment_pipeline.joblib"
@st.cache_resource(show_spinner=False)
def load_model():
"""Download and load the trained model from Hugging Face Hub."""
model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME)
pipeline = joblib.load(model_path)
return pipeline
pipeline = load_model()
# -------------------------------------------------------------------------
# Helper function for predictions
# -------------------------------------------------------------------------
def predict_sentiment(text: str) -> tuple[str, Dict[str, float]]:
"""Predict sentiment and confidence scores."""
probs = pipeline.predict_proba([text])[0]
classes = pipeline.classes_
label = classes[probs.argmax()]
probabilities = dict(zip(classes, probs))
return label, probabilities
def format_probabilities(probabilities: Dict[str, float]) -> pd.DataFrame:
"""Convert probabilities to styled DataFrame."""
return (
pd.DataFrame.from_dict(probabilities, orient="index", columns=["confidence"])
.sort_values("confidence", ascending=False)
.style.format({"confidence": "{:.2%}"})
)
# -------------------------------------------------------------------------
# Streamlit UI
# -------------------------------------------------------------------------
def main():
st.sidebar.header("📊 Model Snapshot")
st.sidebar.write("**Source:**", MODEL_REPO)
st.sidebar.success("✅ Loaded model from Hugging Face Hub")
st.subheader("🔮 Real-Time Sentiment Analysis")
user_input = st.text_area("Enter a tweet or comment:", height=150)
if st.button("Analyze", type="primary"):
if not user_input.strip():
st.warning("⚠️ Please enter text to analyze.")
else:
label, probabilities = predict_sentiment(user_input)
st.success(f"Predicted Sentiment: **{label.title()}**")
st.dataframe(format_probabilities(probabilities), use_container_width=True)
st.markdown("---")
st.caption("© 2025 Deloitte-aligned Sentiment Analytics Accelerator")
if __name__ == "__main__":
main()
except Exception as e:
st.error(f"Startup failed: {e}") |