Spaces:
Sleeping
Sleeping
File size: 9,900 Bytes
ffd126f d643805 ffd126f 24db65a d643805 c400ab5 d643805 24db65a c400ab5 d643805 db58110 d643805 24db65a d643805 66d8f54 d643805 24db65a 66d8f54 24db65a 66d8f54 5376c03 66d8f54 24db65a d643805 66d8f54 d643805 66d8f54 d643805 66d8f54 5376c03 d643805 24db65a cf2d9c9 db58110 cf2d9c9 24db65a cf2d9c9 d643805 66d8f54 24db65a 66d8f54 d643805 cf2d9c9 d643805 24db65a d643805 24db65a d643805 66d8f54 d643805 24db65a 66d8f54 24db65a 66d8f54 24db65a 66d8f54 d643805 66d8f54 d643805 66d8f54 d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a d643805 24db65a c890baf 24db65a c890baf d643805 66d8f54 24db65a 66d8f54 24db65a 66d8f54 24db65a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 |
import streamlit as st
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
from openai import OpenAI
import re
# --- Page configuration ---
st.set_page_config(
page_title="Sentiment Analyzer",
page_icon="π",
layout="wide"
)
# --- App Header ---
st.title("π Sentiment Analysis with AI Responses")
st.markdown("**5-Class Amazon Review Sentiment Analysis + AI-Generated Customer Support Responses**")
st.markdown("*Powered by GPT4.0 mini*")
st.markdown("---")
# --- API Key and Model Setup ---
GITHUB_TOKEN = "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"
@st.cache_resource
def load_llm_client():
try:
return OpenAI(
api_key=GITHUB_TOKEN,
base_url="https://models.inference.ai.azure.com/"
)
except Exception as e:
st.error(f"Failed to initialize LLM client: {str(e)}")
return None
@st.cache_resource
def load_sentiment_model():
try:
model_name = "shivam-1706/distilbert-amazon-sentiment"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
if model.config.num_labels == 5:
st.success("β
5-class sentiment model loaded successfully!")
else:
st.warning(f"β οΈ Model has {model.config.num_labels} classes, expected 5")
return pipeline(
"text-classification",
model=model,
tokenizer=tokenizer,
return_all_scores=True,
device=0 if torch.cuda.is_available() else -1
)
except Exception as e:
st.error(f"Error loading custom model: {str(e)}")
st.warning("β οΈ Using fallback model - this will only show 3 classes")
return pipeline(
"text-classification",
model="cardiffnlp/twitter-roberta-base-sentiment-latest",
return_all_scores=True
)
with st.spinner("Loading DistilBERT model..."):
sentiment_pipeline = load_sentiment_model()
llm_client = load_llm_client()
if llm_client:
st.success("β
GPT4.0 mini connected automatically!")
# --- Helper functions ---
def fix_problematic_words(text):
fixes = {
'phenomenal': 'absolutely amazing excellent outstanding incredible wonderful',
'fabulous': 'excellent amazing wonderful fantastic great',
'superb': 'excellent outstanding amazing great wonderful',
'marvelous': 'amazing excellent wonderful fantastic outstanding',
'spectacular': 'outstanding excellent amazing incredible wonderful',
'divine': 'absolutely perfect excellent amazing wonderful',
'sublime': 'excellent amazing wonderful outstanding perfect',
'magnificent': 'excellent outstanding amazing wonderful great',
'exceptional': 'outstanding excellent amazing wonderful great',
'extraordinary': 'amazing excellent outstanding incredible wonderful',
'brilliant': 'excellent amazing outstanding great',
'fantastic': 'excellent amazing wonderful great',
'incredible': 'amazing excellent outstanding wonderful',
'outstanding': 'excellent amazing great wonderful',
'remarkable': 'excellent amazing outstanding wonderful',
'horrendous': 'absolutely terrible awful horrible disgusting',
'dreadful': 'terrible awful horrible bad disgusting',
'atrocious': 'extremely terrible awful horrible disgusting',
'abysmal': 'terrible awful horrible bad disgusting',
'deplorable': 'terrible awful horrible bad',
'appalling': 'extremely terrible awful horrible',
'horrid': 'terrible awful horrible bad',
'ghastly': 'terrible awful horrible disgusting',
'abominable': 'terrible awful horrible disgusting',
'despicable': 'terrible awful horrible bad'
}
text_fixed = text
for word, replacement in fixes.items():
text_fixed = re.sub(rf'\b{word}\b', replacement, text_fixed, flags=re.IGNORECASE)
return text_fixed
def predict_sentiment_enhanced(text):
if not text.strip():
return "Average", 0.20, {
'Very Bad': 0.10, 'Bad': 0.15, 'Average': 0.50, 'Good': 0.15, 'Very Good': 0.10
}
try:
results = sentiment_pipeline(fix_problematic_words(text))
if isinstance(results[0], list):
results = results[0]
label_map = {
'LABEL_0': 'Very Bad',
'LABEL_1': 'Bad',
'LABEL_2': 'Average',
'LABEL_3': 'Good',
'LABEL_4': 'Very Good'
}
if len(results) == 5:
best_result = max(results, key=lambda x: x['score'])
sentiment = label_map.get(best_result['label'], best_result['label'])
confidence = best_result['score']
all_scores = {label_map.get(r['label'], r['label']): r['score'] for r in results}
return sentiment, confidence, all_scores
else:
fallback_map = {'NEGATIVE': 'Bad', 'NEUTRAL': 'Average', 'POSITIVE': 'Good'}
best_result = max(results, key=lambda x: x['score'])
sentiment = fallback_map.get(best_result['label'], 'Average')
confidence = best_result['score']
all_scores = {'Very Bad': 0.0, 'Bad': 0.0, 'Average': 0.0, 'Good': 0.0, 'Very Good': 0.0}
all_scores[sentiment] = confidence
return sentiment, confidence, all_scores
except Exception as e:
st.error(f"Error in prediction: {str(e)}")
return "Average", 0.5, {'Average': 0.5}
def generate_llm_response(review_text, sentiment):
if not llm_client:
return "β GitHub Models API not available. Please check your API key."
prompts = {
'Very Bad': f"""You are a professional customer service manager. A customer left this review: "{review_text}"...
Their sentiment is very negative. Provide a response that:
1. Shows empathy
2. Offers resolution (refund/replacement)
3. Provides next steps
Response:""",
'Bad': f"""You are a customer service rep. Customer said: "{review_text}"...
Their experience was bad. Acknowledge & offer help. Response:""",
'Average': f"""Respond to a mixed review: "{review_text}"...
Thank, acknowledge, offer help, ask for suggestions. Response:""",
'Good': f"""Customer left a positive review: "{review_text}"...
Thank them and reinforce quality. Response:""",
'Very Good': f"""Delighted customer wrote: "{review_text}"...
Celebrate, thank, and invite sharing. Response:"""
}
try:
response = llm_client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompts.get(sentiment)}],
max_tokens=150,
temperature=0.7
)
return response.choices[0].message.content.strip()
except Exception:
fallback = {
'Very Bad': "Weβre truly sorry for your experience. Please reach out so we can make this right.",
'Bad': "Thanks for your feedback. Weβd like to help resolve your concern.",
'Average': "Thanks for your honest review. We're listening and happy to improve.",
'Good': "Thank you for your kind words! We appreciate your support.",
'Very Good': "Weβre thrilled you loved it! Thanks for your amazing review."
}
return fallback.get(sentiment, "Thank you for your feedback!")
# --- App Interface ---
col1, col2 = st.columns([2, 1])
with col1:
st.subheader("π Enter Product Review")
review_text = st.text_area(
"Type or paste a product review here:",
height=150,
placeholder="Example: This product broke after just two days of use. Very disappointed with the quality and delivery was delayed too."
)
analyze_button = st.button("π Analyze & Generate Response", type="primary")
if analyze_button and not review_text.strip():
st.warning("β οΈ Please enter a review to analyze!")
if analyze_button and review_text.strip():
sentiment, confidence, all_scores = predict_sentiment_enhanced(review_text)
with col2:
st.subheader("π Analysis Results")
color_map = {
'Very Good': 'green',
'Good': 'blue',
'Average': 'orange',
'Bad': 'red',
'Very Bad': 'violet'
}
emoji_map = {
'Very Bad': 'π‘',
'Bad': 'π',
'Average': 'π',
'Good': 'π',
'Very Good': 'π€©'
}
color = color_map.get(sentiment, 'blue')
emoji = emoji_map.get(sentiment, 'π')
st.markdown(f"**Sentiment:** :{color}[{sentiment}] {emoji}")
st.progress(confidence)
st.caption(f"Confidence: {confidence:.2%}")
st.subheader("π― All 5 Class Predictions")
class_order = ['Very Bad', 'Bad', 'Average', 'Good', 'Very Good']
for class_name in class_order:
score = all_scores.get(class_name, 0.0)
st.write(f"{emoji_map.get(class_name)} {class_name}: {score:.1%}")
with col1:
st.markdown("---")
with st.spinner("Generating AI response..."):
ai_response = generate_llm_response(review_text, sentiment)
st.subheader("π€ AI Customer Support Response")
st.info(ai_response)
strategies = {
'Very Bad': "π Crisis Management: Immediate resolution",
'Bad': "π§ Problem Resolution: Solutions & improvements",
'Average': "βοΈ Balanced: Acknowledge & enhance",
'Good': "π Appreciation: Maintain quality",
'Very Good': "π Celebration: Encourage sharing"
}
st.caption(f"**Strategy:** {strategies.get(sentiment)}")
|