Soundaryasos commited on
Commit
2c64d59
·
verified ·
1 Parent(s): f3883f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +206 -442
app.py CHANGED
@@ -2,454 +2,218 @@ import streamlit as st
2
  import pandas as pd
3
  import numpy as np
4
  import matplotlib.pyplot as plt
5
- import seaborn as sns
6
- import nltk
7
- from textblob import TextBlob
8
- from wordcloud import WordCloud, STOPWORDS
9
- import plotly.express as px
10
- import plotly.graph_objects as go
11
- from plotly.subplots import make_subplots
12
  from datetime import datetime, timedelta
 
 
 
 
 
 
13
  from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
14
- from sklearn.linear_model import LinearRegression
15
- from sklearn.ensemble import RandomForestRegressor
16
- from sklearn.model_selection import train_test_split
17
- from sklearn.preprocessing import LabelEncoder, MinMaxScaler
18
- from sklearn.metrics import mean_squared_error, r2_score
19
- from io import BytesIO
20
- import base64
21
- import re
22
- import json
23
- import altair as alt
24
- import time
25
- import requests
26
- from PIL import Image
27
- from collections import Counter
28
- import spacy
29
- import emoji
30
  import warnings
31
  warnings.filterwarnings('ignore')
32
 
33
- # Initialize spaCy for advanced NLP
34
- try:
35
- nlp = spacy.load("en_core_web_sm")
36
- except:
37
- st.warning("Installing spaCy model. This might take a minute...")
38
- import subprocess
39
- subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], capture_output=True)
40
- nlp = spacy.load("en_core_web_sm")
41
-
42
- # Ensure necessary NLTK data is available
43
- nltk.download('punkt', quiet=True)
44
- nltk.download('stopwords', quiet=True)
45
- nltk.download('wordnet', quiet=True)
46
- nltk.download('vader_lexicon', quiet=True)
47
 
48
  # Page Configuration
49
- st.set_page_config(
50
- page_title="Sentiment Pulse | Advanced Sentiment Analyzer",
51
- page_icon="🔮",
52
- layout="wide",
53
- initial_sidebar_state="expanded"
54
- )
55
-
56
- # Apply custom CSS for modern look
57
- st.markdown("""
58
- <style>
59
- /* Main theme colors */
60
- :root {
61
- --primary: #7B68EE;
62
- --secondary: #00BFFF;
63
- --background: #F8F9FA;
64
- --text: #333333;
65
- --accent: #FF69B4;
66
- }
67
-
68
- /* Base Styles */
69
- .reportview-container {
70
- background-color: var(--background);
71
- color: var(--text);
72
- }
73
-
74
- .sidebar .sidebar-content {
75
- background-image: linear-gradient(to bottom, var(--primary), var(--secondary));
76
- color: white;
77
- }
78
-
79
- /* Card-like containers */
80
- .card {
81
- background-color: white;
82
- border-radius: 10px;
83
- padding: 20px;
84
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
85
- margin-bottom: 20px;
86
- }
87
-
88
- /* Header styling */
89
- h1, h2, h3 {
90
- color: var(--primary);
91
- font-weight: 700;
92
- }
93
-
94
- /* Button styling */
95
- .stButton>button {
96
- background-color: var(--primary);
97
- color: white;
98
- border-radius: 8px;
99
- border: none;
100
- transition: all 0.3s;
101
- }
102
- .stButton>button:hover {
103
- background-color: var(--secondary);
104
- transform: translateY(-2px);
105
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15);
106
- }
107
-
108
- /* Metric styling */
109
- .metric-value {
110
- font-size: 32px;
111
- font-weight: 700;
112
- color: var(--primary);
113
- }
114
-
115
- .metric-label {
116
- font-size: 14px;
117
- color: var(--text);
118
- opacity: 0.7;
119
- }
120
-
121
- /* Divider */
122
- .divider {
123
- height: 3px;
124
- background-image: linear-gradient(to right, var(--primary), var(--secondary));
125
- margin: 20px 0;
126
- border-radius: 3px;
127
- }
128
-
129
- /* Hide hamburger menu and footer */
130
- #MainMenu {visibility: hidden;}
131
- footer {visibility: hidden;}
132
-
133
- /* Custom tab styling */
134
- .stTabs [data-baseweb="tab-list"] {
135
- gap: 8px;
136
- }
137
-
138
- .stTabs [data-baseweb="tab"] {
139
- background-color: transparent;
140
- border-radius: 4px 4px 0px 0px;
141
- border: none;
142
- color: var(--text);
143
- padding: 10px 16px;
144
- }
145
-
146
- .stTabs [aria-selected="true"] {
147
- background-color: white !important;
148
- color: var(--primary) !important;
149
- font-weight: bold;
150
- border-top: 2px solid var(--primary);
151
- }
152
-
153
- /* Tooltip */
154
- .tooltip {
155
- position: relative;
156
- display: inline-block;
157
- border-bottom: 1px dotted black;
158
- }
159
-
160
- .tooltip .tooltiptext {
161
- visibility: hidden;
162
- width: 200px;
163
- background-color: #555;
164
- color: #fff;
165
- text-align: center;
166
- border-radius: 6px;
167
- padding: 5px;
168
- position: absolute;
169
- z-index: 1;
170
- bottom: 125%;
171
- left: 50%;
172
- margin-left: -100px;
173
- opacity: 0;
174
- transition: opacity 0.3s;
175
- }
176
-
177
- .tooltip:hover .tooltiptext {
178
- visibility: visible;
179
- opacity: 1;
180
- }
181
- </style>
182
- """, unsafe_allow_html=True)
183
-
184
- # ===== UTILITY FUNCTIONS =====
185
- def clean_text(text):
186
- """Clean and preprocess text for analysis"""
187
- if not isinstance(text, str):
188
- return ""
189
-
190
- # Convert to lowercase
191
- text = text.lower()
192
-
193
- # Remove URLs
194
- text = re.sub(r'https?://\S+|www\.\S+', '', text)
195
-
196
- # Remove mentions and hashtags for analysis
197
- text = re.sub(r'@\w+|#\w+', '', text)
198
-
199
- # Remove punctuation and special characters
200
- text = re.sub(r'[^\w\s]', '', text)
201
-
202
- # Remove extra whitespace
203
- text = re.sub(r'\s+', ' ', text).strip()
204
-
205
- return text
206
-
207
- def extract_hashtags(text):
208
- """Extract hashtags from text"""
209
- if not isinstance(text, str):
210
- return []
211
- return re.findall(r'#(\w+)', text)
212
-
213
- def extract_mentions(text):
214
- """Extract mentions from text"""
215
- if not isinstance(text, str):
216
- return []
217
- return re.findall(r'@(\w+)', text)
218
-
219
- def count_emojis(text):
220
- """Count emojis in text"""
221
- if not isinstance(text, str):
222
- return 0
223
- return len([c for c in text if c in emoji.EMOJI_DATA])
224
-
225
- def get_emoji_sentiment(text):
226
- """Get sentiment of emojis in text"""
227
- if not isinstance(text, str):
228
- return 0
229
-
230
- # Simple dictionary of emoji sentiment (expand as needed)
231
- emoji_sentiment = {
232
- '😊': 1, '😃': 1, '😄': 1, '😁': 1, '😍': 1,
233
- '😢': -1, '😭': -1, '😡': -1, '😠': -1, '😞': -1
234
- }
235
-
236
- sentiment = 0
237
- for char in text:
238
- if char in emoji_sentiment:
239
- sentiment += emoji_sentiment[char]
240
-
241
- return sentiment
242
-
243
- def generate_wordcloud(text, mask=None, background_color='white'):
244
- """Generate word cloud from text"""
245
- if not text or not isinstance(text, str):
246
- return None
247
-
248
- stopwords = set(STOPWORDS)
249
- # Add custom stopwords
250
- custom_stopwords = {'the', 'and', 'to', 'of', 'a', 'in', 'is', 'that', 'it', 'was'}
251
- stopwords.update(custom_stopwords)
252
-
253
- wordcloud = WordCloud(
254
- width=800,
255
- height=400,
256
- background_color=background_color,
257
- stopwords=stopwords,
258
- max_words=150,
259
- colormap='viridis',
260
- contour_width=3,
261
- contour_color='steelblue',
262
- collocations=False
263
- ).generate(text)
264
-
265
- return wordcloud
266
-
267
- def get_entity_analysis(text):
268
- """Extract named entities from text using spaCy"""
269
- if not text or not isinstance(text, str):
270
- return {}
271
-
272
- doc = nlp(text)
273
- entities = {}
274
-
275
- for ent in doc.ents:
276
- if ent.label_ not in entities:
277
- entities[ent.label_] = []
278
- entities[ent.label_].append(ent.text)
279
-
280
- return entities
281
-
282
- def extract_keywords(text, top_n=10):
283
- """Extract keywords from text using spaCy"""
284
- if not text or not isinstance(text, str):
285
- return []
286
-
287
- doc = nlp(text)
288
- keywords = []
289
-
290
- for token in doc:
291
- if (not token.is_stop and
292
- not token.is_punct and
293
- token.pos_ in ('NOUN', 'PROPN', 'ADJ') and
294
- len(token.text) > 1):
295
- keywords.append(token.text.lower())
296
-
297
- # Count and get top keywords
298
- keyword_counts = Counter(keywords)
299
- return keyword_counts.most_common(top_n)
300
-
301
- def analyze_tone(text):
302
- """Analyze the tone of text"""
303
- if not text or not isinstance(text, str):
304
- return "Neutral"
305
-
306
- # Use TextBlob for sentiment
307
- blob = TextBlob(text)
308
- polarity = blob.sentiment.polarity
309
- subjectivity = blob.sentiment.subjectivity
310
-
311
- # Tone categories
312
- if polarity > 0.5:
313
- if subjectivity > 0.7:
314
- return "Enthusiastic"
315
- else:
316
- return "Positive"
317
- elif polarity > 0.1:
318
- if subjectivity > 0.7:
319
- return "Interested"
320
- else:
321
- return "Somewhat Positive"
322
- elif polarity < -0.5:
323
- if subjectivity > 0.7:
324
- return "Angry"
325
  else:
326
- return "Negative"
327
- elif polarity < -0.1:
328
- if subjectivity > 0.7:
329
- return "Frustrated"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  else:
331
- return "Somewhat Negative"
332
- else:
333
- if subjectivity > 0.7:
334
- return "Uncertain"
335
- else:
336
- return "Neutral"
337
-
338
- def analyze_readability(text):
339
- """Analyze text readability metrics"""
340
- if not text or not isinstance(text, str):
341
- return {}
342
-
343
- # Word count
344
- words = text.split()
345
- word_count = len(words)
346
-
347
- if word_count == 0:
348
- return {
349
- "word_count": 0,
350
- "avg_word_length": 0,
351
- "avg_sentence_length": 0,
352
- "readability_score": 0,
353
- "complexity": "N/A"
354
- }
355
-
356
- # Sentence count
357
- sentences = nltk.sent_tokenize(text)
358
- sentence_count = len(sentences)
359
-
360
- # Average word length
361
- avg_word_length = sum(len(word) for word in words) / word_count if word_count > 0 else 0
362
-
363
- # Average sentence length
364
- avg_sentence_length = word_count / sentence_count if sentence_count > 0 else 0
365
-
366
- # Simplified readability score (based on avg word & sentence length)
367
- readability_score = 206.835 - (1.015 * avg_sentence_length) - (84.6 * avg_word_length / 5)
368
- readability_score = max(0, min(100, readability_score))
369
-
370
- # Determine complexity
371
- if readability_score > 90:
372
- complexity = "Very Easy"
373
- elif readability_score > 80:
374
- complexity = "Easy"
375
- elif readability_score > 70:
376
- complexity = "Fairly Easy"
377
- elif readability_score > 60:
378
- complexity = "Standard"
379
- elif readability_score > 50:
380
- complexity = "Fairly Difficult"
381
- elif readability_score > 30:
382
- complexity = "Difficult"
383
- else:
384
- complexity = "Very Difficult"
385
-
386
- return {
387
- "word_count": word_count,
388
- "avg_word_length": round(avg_word_length, 2),
389
- "avg_sentence_length": round(avg_sentence_length, 2),
390
- "readability_score": round(readability_score, 2),
391
- "complexity": complexity
392
- }
393
-
394
- def get_sentiment_color(score):
395
- """Get color based on sentiment score"""
396
- if score > 0.5:
397
- return "#2E8B57" # Strong positive: Sea Green
398
- elif score > 0:
399
- return "#90EE90" # Positive: Light Green
400
- elif score == 0:
401
- return "#D3D3D3" # Neutral: Light Gray
402
- elif score > -0.5:
403
- return "#FFA07A" # Negative: Light Salmon
404
- else:
405
- return "#DC143C" # Strong negative: Crimson
406
-
407
- def map_sentiment_to_emoji(score):
408
- """Map sentiment score to emoji"""
409
- if score > 0.75:
410
- return "😍"
411
- elif score > 0.5:
412
- return "😁"
413
- elif score > 0.25:
414
- return "🙂"
415
- elif score > 0:
416
- return "😊"
417
- elif score == 0:
418
- return "😐"
419
- elif score > -0.25:
420
- return "😕"
421
- elif score > -0.5:
422
- return "😟"
423
- elif score > -0.75:
424
- return "😞"
425
- else:
426
- return "😡"
427
-
428
- def download_as_file(object_to_download, download_filename, button_text, pickle_it=False):
429
- """
430
- Generates a link to download the given object_to_download.
431
-
432
- Args:
433
- object_to_download: The object to be downloaded.
434
- download_filename: Filename that the object will be saved as.
435
- button_text: Text to display on the download button.
436
- pickle_it: If True, pickle file.
437
- """
438
- if pickle_it:
439
- try:
440
- object_to_download = pickle.dumps(object_to_download)
441
- except pickle.PicklingError:
442
- return None
443
-
444
- # Convert to bytes
445
- if isinstance(object_to_download, bytes):
446
- pass
447
- elif isinstance(object_to_download, pd.DataFrame):
448
- object_to_download = object_to_download.to_csv(index=False).encode()
449
- # Add other data types as needed
450
- else:
451
- object_to_download = str(object_to_download).encode()
452
-
453
- # Generate download button
454
- b64 = base64.b64encode(object_to_download).decode()
455
- button_uuid = str(hash(button_text))
 
2
  import pandas as pd
3
  import numpy as np
4
  import matplotlib.pyplot as plt
 
 
 
 
 
 
 
5
  from datetime import datetime, timedelta
6
+ from sklearn.preprocessing import MinMaxScaler
7
+ from sklearn.linear_model import LogisticRegression
8
+ import tensorflow as tf
9
+ from tensorflow.keras.models import Sequential
10
+ from tensorflow.keras.layers import LSTM, Dense, Dropout
11
+ from transformers import pipeline
12
  from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
13
+ import shap
14
+ import praw
15
+ from googleapiclient.discovery import build
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  import warnings
17
  warnings.filterwarnings('ignore')
18
 
19
+ # Set random seeds
20
+ np.random.seed(42)
21
+ tf.random.set_seed(42)
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  # Page Configuration
24
+ st.set_page_config(page_title="Sentiment Pulse", layout="wide")
25
+ st.markdown("<h1 style='text-align: center; color: #7B68EE;'>Sentiment Pulse: Multi-Platform Analysis</h1>", unsafe_allow_html=True)
26
+
27
+ # API Credentials (replace with your own)
28
+ REDDIT_CLIENT_ID = "S7pTXhj5JDFGDb3-_zrJEA"
29
+ REDDIT_CLIENT_SECRET = "QP3NYN4lrAKVLrBamzLGrpFywiVg8w"
30
+ REDDIT_USER_AGENT = "SoundaryaR_Bot/1.0"
31
+ YOUTUBE_API_KEY = "AIzaSyAChqXPaiNE9hKhApkgjgonzdgiCCOo"
32
+
33
+ # Initialize APIs
34
+ reddit = praw.Reddit(client_id=REDDIT_CLIENT_ID, client_secret=REDDIT_CLIENT_SECRET, user_agent=REDDIT_USER_AGENT)
35
+ youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
36
+ bert_classifier = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
37
+ vader_analyzer = SentimentIntensityAnalyzer()
38
+
39
+ # Load Twitter Dataset
40
+ @st.cache_data
41
+ def load_twitter_data():
42
+ df = pd.read_csv("twitter_dataset.csv", encoding='latin-1',
43
+ names=['sentiment', 'id', 'date', 'query', 'user', 'text'])
44
+ df['date'] = pd.to_datetime(df['date'])
45
+ df['sentiment'] = df['sentiment'].map({0: 'negative', 4: 'positive'})
46
+ return df.sample(10000)
47
+
48
+ # Fetch Live Reddit Data
49
+ def fetch_reddit_data(keyword):
50
+ subreddit = reddit.subreddit("all")
51
+ posts = subreddit.search(keyword, limit=100)
52
+ data = []
53
+ for post in posts:
54
+ data.append({'date': datetime.fromtimestamp(post.created_utc), 'text': post.title + " " + post.selftext})
55
+ return pd.DataFrame(data)
56
+
57
+ # Fetch Live YouTube Data
58
+ def fetch_youtube_data(keyword):
59
+ request = youtube.search().list(q=keyword, part="snippet", maxResults=50, type="video")
60
+ response = request.execute()
61
+ data = []
62
+ for item in response['items']:
63
+ title = item['snippet']['title']
64
+ description = item['snippet']['description']
65
+ published_at = datetime.strptime(item['snippet']['publishedAt'], "%Y-%m-%dT%H:%M:%SZ")
66
+ data.append({'date': published_at, 'text': title + " " + description})
67
+ return pd.DataFrame(data)
68
+
69
+ # Sentiment Analysis Functions
70
+ def get_bert_sentiment(text):
71
+ try:
72
+ result = bert_classifier(text[:512])[0]
73
+ return 1 if result['label'] == 'POSITIVE' else 0, result['score']
74
+ except:
75
+ return 0, 0.5
76
+
77
+ def get_vader_sentiment(text):
78
+ score = vader_analyzer.polarity_scores(text)['compound']
79
+ return 1 if score > 0 else 0, score
80
+
81
+ def combined_sentiment(text):
82
+ bert_label, bert_score = get_bert_sentiment(text)
83
+ vader_label, vader_score = get_vader_sentiment(text)
84
+ avg_score = (bert_score + abs(vader_score)) / 2
85
+ return 1 if avg_score > 0.5 else 0, avg_score
86
+
87
+ # Sidebar for Keyword Input
88
+ st.sidebar.title("Keyword Search")
89
+ keyword = st.sidebar.text_input("Enter a keyword (e.g., 'happy')", value="happy")
90
+
91
+ # Process Data
92
+ twitter_df = load_twitter_data()
93
+ twitter_filtered = twitter_df[twitter_df['text'].str.contains(keyword, case=False, na=False)]
94
+ reddit_df = fetch_reddit_data(keyword)
95
+ youtube_df = fetch_youtube_data(keyword)
96
+
97
+ # Check Validity
98
+ platforms = {'Twitter': twitter_filtered, 'Reddit': reddit_df, 'YouTube': youtube_df}
99
+ valid_platforms = {k: v for k, v in platforms.items() if not v.empty}
100
+
101
+ if not valid_platforms:
102
+ st.error(f"Error: '{keyword}' is not a valid keyword. No matching data found across Twitter, Reddit, or YouTube.")
103
+ else:
104
+ for platform, df in valid_platforms.items():
105
+ st.subheader(f"{platform} Analysis for '{keyword}'")
106
+ if platform == 'Twitter':
107
+ st.write(f"{platform} Dataset Preview:", df[['text', 'date']].head())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  else:
109
+ st.write(f"{platform} Live Data Preview:", df.head())
110
+
111
+ # Sentiment Analysis
112
+ with st.spinner(f"Analyzing {platform} sentiments..."):
113
+ df['bert_sentiment'], df['bert_score'] = zip(*df['text'].apply(get_bert_sentiment))
114
+ df['vader_sentiment'], df['vader_score'] = zip(*df['text'].apply(get_vader_sentiment))
115
+ df['combined_sentiment'], df['combined_score'] = zip(*df['text'].apply(combined_sentiment))
116
+ st.write(f"{platform} Sentiment Results:", df[['text', 'combined_sentiment', 'combined_score']].head())
117
+
118
+ # Time-Series Preparation
119
+ daily_sentiment = df.groupby(df['date'].dt.date)['combined_score'].mean().reset_index()
120
+ daily_sentiment['date'] = pd.to_datetime(daily_sentiment['date'])
121
+ daily_sentiment['tweet_count'] = df.groupby(df['date'].dt.date).size().values
122
+
123
+ if len(daily_sentiment) < 8:
124
+ st.warning(f"Not enough {platform} data for '{keyword}' to predict 30 days.")
125
+ fig, ax = plt.subplots(figsize=(10, 6))
126
+ ax.plot(daily_sentiment['date'], daily_sentiment['combined_score'], 'g-', label='Historical Sentiment')
127
+ ax.set_xlabel('Date')
128
+ ax.set_ylabel('Sentiment Score')
129
+ ax.set_title(f"{platform} Historical Sentiment for '{keyword}'")
130
+ ax.legend()
131
+ st.pyplot(fig)
132
  else:
133
+ scaler = MinMaxScaler()
134
+ daily_sentiment['scaled_score'] = scaler.fit_transform(daily_sentiment[['combined_score']])
135
+
136
+ # LSTM Sequences
137
+ def create_sequences(data, seq_length):
138
+ X, y = [], []
139
+ for i in range(len(data) - seq_length):
140
+ X.append(data[i:i + seq_length])
141
+ y.append(data[i + seq_length])
142
+ return np.array(X), np.array(y)
143
+
144
+ seq_length = 7
145
+ X, y = create_sequences(daily_sentiment['scaled_score'].values, seq_length)
146
+ X = X.reshape((X.shape[0], X.shape[1], 1))
147
+
148
+ # Train LSTM
149
+ model = Sequential([
150
+ LSTM(50, return_sequences=True, input_shape=(seq_length, 1)),
151
+ Dropout(0.2),
152
+ LSTM(25),
153
+ Dropout(0.2),
154
+ Dense(1, activation='sigmoid')
155
+ ])
156
+ model.compile(optimizer='adam', loss='mse')
157
+ model.fit(X, y, epochs=10, batch_size=32, validation_split=0.2, verbose=0)
158
+
159
+ # Predict 30 Days
160
+ last_sequence = daily_sentiment['scaled_score'][-seq_length:].values.reshape((1, seq_length, 1))
161
+ predictions = []
162
+ for _ in range(30):
163
+ pred = model.predict(last_sequence, verbose=0)
164
+ predictions.append(pred[0][0])
165
+ last_sequence = np.roll(last_sequence, -1)
166
+ last_sequence[0, -1, 0] = pred[0][0]
167
+ predictions = scaler.inverse_transform(np.array(predictions).reshape(-1, 1)).flatten()
168
+
169
+ # Logistic Regression
170
+ X_lr = np.column_stack((daily_sentiment['scaled_score'], daily_sentiment['tweet_count']))
171
+ y_lr = (daily_sentiment['combined_score'] > 0.5).astype(int)
172
+ lr_model = LogisticRegression()
173
+ lr_model.fit(X_lr, y_lr)
174
+
175
+ future_dates = [daily_sentiment['date'].iloc[-1] + timedelta(days=i) for i in range(1, 31)]
176
+ X_future = np.column_stack((predictions, [daily_sentiment['tweet_count'].mean()] * 30))
177
+ lr_predictions = lr_model.predict_proba(X_future)[:, 1]
178
+
179
+ # SHAP Explainability
180
+ st.subheader(f"{platform} SHAP Explainability")
181
+ explainer_lr = shap.LinearExplainer(lr_model, X_lr)
182
+ shap_values_lr = explainer_lr.shap_values(X_lr)
183
+ fig_lr, ax = plt.subplots()
184
+ shap.summary_plot(shap_values_lr, X_lr, feature_names=['Sentiment Score', 'Count'], show=False)
185
+ st.pyplot(fig_lr)
186
+
187
+ def lstm_predict(inputs):
188
+ inputs = inputs.reshape((inputs.shape[0], seq_length, 1))
189
+ return model.predict(inputs, verbose=0)
190
+
191
+ explainer_lstm = shap.KernelExplainer(lstm_predict, X[:50])
192
+ shap_values_lstm = explainer_lstm.shap_values(X[:50], nsamples=100)
193
+ fig_lstm, ax = plt.subplots()
194
+ shap.summary_plot(shap_values_lstm, X[:50], plot_type="bar", show=False)
195
+ st.pyplot(fig_lstm)
196
+
197
+ # Visualization
198
+ st.subheader(f"{platform} 30-Day Sentiment Prediction")
199
+ results_df = pd.DataFrame({
200
+ 'Date': future_dates,
201
+ 'Predicted Sentiment': predictions,
202
+ 'Positive Probability': lr_predictions
203
+ })
204
+ fig, ax1 = plt.subplots(figsize=(10, 6))
205
+ ax1.plot(daily_sentiment['date'], daily_sentiment['combined_score'], 'g-', label='Historical Sentiment')
206
+ ax1.plot(results_df['Date'], results_df['Predicted Sentiment'], 'b-', label='Predicted Sentiment')
207
+ ax1.set_xlabel('Date')
208
+ ax1.set_ylabel('Sentiment Score', color='b')
209
+ ax2 = ax1.twinx()
210
+ ax2.plot(results_df['Date'], results_df['Positive Probability'], 'r-', label='Positive Probability')
211
+ ax2.set_ylabel('Positive Probability', color='r')
212
+ fig.legend(loc='upper left', bbox_to_anchor=(0.1, 0.9))
213
+ plt.title(f"{platform} Sentiment Forecast for '{keyword}'")
214
+ st.pyplot(fig)
215
+
216
+ # Sidebar Instructions
217
+ st.sidebar.write("1. Ensure 'sentiment140.csv' is in the folder.")
218
+ st.sidebar.write("2. Enter a keyword to analyze live Reddit/YouTube and Twitter dataset.")
219
+ st.sidebar.write("3. Run: `streamlit run sentiment_app.py`")