Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,27 +16,22 @@ from googleapiclient.discovery import build
|
|
| 16 |
import warnings
|
| 17 |
warnings.filterwarnings('ignore')
|
| 18 |
|
| 19 |
-
# Set random seeds
|
| 20 |
np.random.seed(42)
|
| 21 |
tf.random.set_seed(42)
|
| 22 |
|
| 23 |
-
# Page Configuration
|
| 24 |
st.set_page_config(page_title="Sentiment Pulse", layout="wide")
|
| 25 |
st.markdown("<h1 style='text-align: center; color: #7B68EE;'>Sentiment Pulse: Multi-Platform Analysis</h1>", unsafe_allow_html=True)
|
| 26 |
|
| 27 |
-
# API Credentials (replace with your own)
|
| 28 |
REDDIT_CLIENT_ID = "S7pTXhj5JDFGDb3-_zrJEA"
|
| 29 |
REDDIT_CLIENT_SECRET = "QP3NYN4lrAKVLrBamzLGrpFywiVg8w"
|
| 30 |
REDDIT_USER_AGENT = "SoundaryaR_Bot/1.0"
|
| 31 |
YOUTUBE_API_KEY = "AIzaSyAChqXPaiNE9hKhApkgjgonzdgiCCOo"
|
| 32 |
|
| 33 |
-
# Initialize APIs
|
| 34 |
reddit = praw.Reddit(client_id=REDDIT_CLIENT_ID, client_secret=REDDIT_CLIENT_SECRET, user_agent=REDDIT_USER_AGENT)
|
| 35 |
youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
|
| 36 |
bert_classifier = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
|
| 37 |
vader_analyzer = SentimentIntensityAnalyzer()
|
| 38 |
|
| 39 |
-
# Load Twitter Dataset
|
| 40 |
@st.cache_data
|
| 41 |
def load_twitter_data():
|
| 42 |
df = pd.read_csv("twitter_dataset.csv", encoding='latin-1',
|
|
@@ -45,28 +40,19 @@ def load_twitter_data():
|
|
| 45 |
df['sentiment'] = df['sentiment'].map({0: 'negative', 4: 'positive'})
|
| 46 |
return df.sample(10000)
|
| 47 |
|
| 48 |
-
# Fetch Live Reddit Data
|
| 49 |
def fetch_reddit_data(keyword):
|
| 50 |
subreddit = reddit.subreddit("all")
|
| 51 |
posts = subreddit.search(keyword, limit=100)
|
| 52 |
-
|
| 53 |
-
for post in posts:
|
| 54 |
-
data.append({'date': datetime.fromtimestamp(post.created_utc), 'text': post.title + " " + post.selftext})
|
| 55 |
-
return pd.DataFrame(data)
|
| 56 |
|
| 57 |
-
# Fetch Live YouTube Data
|
| 58 |
def fetch_youtube_data(keyword):
|
| 59 |
request = youtube.search().list(q=keyword, part="snippet", maxResults=50, type="video")
|
| 60 |
response = request.execute()
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
title
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
data.append({'date': published_at, 'text': title + " " + description})
|
| 67 |
-
return pd.DataFrame(data)
|
| 68 |
-
|
| 69 |
-
# Sentiment Analysis Functions
|
| 70 |
def get_bert_sentiment(text):
|
| 71 |
try:
|
| 72 |
result = bert_classifier(text[:512])[0]
|
|
@@ -84,17 +70,14 @@ def combined_sentiment(text):
|
|
| 84 |
avg_score = (bert_score + abs(vader_score)) / 2
|
| 85 |
return 1 if avg_score > 0.5 else 0, avg_score
|
| 86 |
|
| 87 |
-
# Sidebar for Keyword Input
|
| 88 |
st.sidebar.title("Keyword Search")
|
| 89 |
keyword = st.sidebar.text_input("Enter a keyword (e.g., 'happy')", value="happy")
|
| 90 |
|
| 91 |
-
# Process Data
|
| 92 |
twitter_df = load_twitter_data()
|
| 93 |
twitter_filtered = twitter_df[twitter_df['text'].str.contains(keyword, case=False, na=False)]
|
| 94 |
reddit_df = fetch_reddit_data(keyword)
|
| 95 |
youtube_df = fetch_youtube_data(keyword)
|
| 96 |
|
| 97 |
-
# Check Validity
|
| 98 |
platforms = {'Twitter': twitter_filtered, 'Reddit': reddit_df, 'YouTube': youtube_df}
|
| 99 |
valid_platforms = {k: v for k, v in platforms.items() if not v.empty}
|
| 100 |
|
|
@@ -103,117 +86,77 @@ if not valid_platforms:
|
|
| 103 |
else:
|
| 104 |
for platform, df in valid_platforms.items():
|
| 105 |
st.subheader(f"{platform} Analysis for '{keyword}'")
|
| 106 |
-
|
| 107 |
-
st.write(f"{platform} Dataset Preview:", df[['text', 'date']].head())
|
| 108 |
-
else:
|
| 109 |
-
st.write(f"{platform} Live Data Preview:", df.head())
|
| 110 |
|
| 111 |
-
# Sentiment Analysis
|
| 112 |
with st.spinner(f"Analyzing {platform} sentiments..."):
|
| 113 |
df['bert_sentiment'], df['bert_score'] = zip(*df['text'].apply(get_bert_sentiment))
|
| 114 |
df['vader_sentiment'], df['vader_score'] = zip(*df['text'].apply(get_vader_sentiment))
|
| 115 |
df['combined_sentiment'], df['combined_score'] = zip(*df['text'].apply(combined_sentiment))
|
| 116 |
-
st.write(f"{platform} Sentiment Results:", df[['text', 'combined_sentiment', 'combined_score']].head())
|
| 117 |
|
| 118 |
-
|
|
|
|
| 119 |
daily_sentiment = df.groupby(df['date'].dt.date)['combined_score'].mean().reset_index()
|
| 120 |
daily_sentiment['date'] = pd.to_datetime(daily_sentiment['date'])
|
| 121 |
daily_sentiment['tweet_count'] = df.groupby(df['date'].dt.date).size().values
|
| 122 |
|
| 123 |
if len(daily_sentiment) < 8:
|
| 124 |
-
st.warning(f"Not enough {platform} data for
|
| 125 |
-
fig, ax = plt.subplots(
|
| 126 |
-
ax.plot(daily_sentiment['date'], daily_sentiment['combined_score'],
|
| 127 |
-
ax.set_xlabel('Date')
|
| 128 |
-
ax.set_ylabel('Sentiment Score')
|
| 129 |
-
ax.set_title(f"{platform} Historical Sentiment for '{keyword}'")
|
| 130 |
-
ax.legend()
|
| 131 |
st.pyplot(fig)
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
inputs = inputs.reshape((inputs.shape[0], seq_length, 1))
|
| 189 |
-
return model.predict(inputs, verbose=0)
|
| 190 |
-
|
| 191 |
-
explainer_lstm = shap.KernelExplainer(lstm_predict, X[:50])
|
| 192 |
-
shap_values_lstm = explainer_lstm.shap_values(X[:50], nsamples=100)
|
| 193 |
-
fig_lstm, ax = plt.subplots()
|
| 194 |
-
shap.summary_plot(shap_values_lstm, X[:50], plot_type="bar", show=False)
|
| 195 |
-
st.pyplot(fig_lstm)
|
| 196 |
-
|
| 197 |
-
# Visualization
|
| 198 |
-
st.subheader(f"{platform} 30-Day Sentiment Prediction")
|
| 199 |
-
results_df = pd.DataFrame({
|
| 200 |
-
'Date': future_dates,
|
| 201 |
-
'Predicted Sentiment': predictions,
|
| 202 |
-
'Positive Probability': lr_predictions
|
| 203 |
-
})
|
| 204 |
-
fig, ax1 = plt.subplots(figsize=(10, 6))
|
| 205 |
-
ax1.plot(daily_sentiment['date'], daily_sentiment['combined_score'], 'g-', label='Historical Sentiment')
|
| 206 |
-
ax1.plot(results_df['Date'], results_df['Predicted Sentiment'], 'b-', label='Predicted Sentiment')
|
| 207 |
-
ax1.set_xlabel('Date')
|
| 208 |
-
ax1.set_ylabel('Sentiment Score', color='b')
|
| 209 |
-
ax2 = ax1.twinx()
|
| 210 |
-
ax2.plot(results_df['Date'], results_df['Positive Probability'], 'r-', label='Positive Probability')
|
| 211 |
-
ax2.set_ylabel('Positive Probability', color='r')
|
| 212 |
-
fig.legend(loc='upper left', bbox_to_anchor=(0.1, 0.9))
|
| 213 |
-
plt.title(f"{platform} Sentiment Forecast for '{keyword}'")
|
| 214 |
-
st.pyplot(fig)
|
| 215 |
-
|
| 216 |
-
# Sidebar Instructions
|
| 217 |
-
st.sidebar.write("1. Ensure 'sentiment140.csv' is in the folder.")
|
| 218 |
-
st.sidebar.write("2. Enter a keyword to analyze live Reddit/YouTube and Twitter dataset.")
|
| 219 |
-
st.sidebar.write("3. Run: `streamlit run sentiment_app.py`")
|
|
|
|
| 16 |
import warnings
|
| 17 |
warnings.filterwarnings('ignore')
|
| 18 |
|
|
|
|
| 19 |
np.random.seed(42)
|
| 20 |
tf.random.set_seed(42)
|
| 21 |
|
|
|
|
| 22 |
st.set_page_config(page_title="Sentiment Pulse", layout="wide")
|
| 23 |
st.markdown("<h1 style='text-align: center; color: #7B68EE;'>Sentiment Pulse: Multi-Platform Analysis</h1>", unsafe_allow_html=True)
|
| 24 |
|
|
|
|
| 25 |
REDDIT_CLIENT_ID = "S7pTXhj5JDFGDb3-_zrJEA"
|
| 26 |
REDDIT_CLIENT_SECRET = "QP3NYN4lrAKVLrBamzLGrpFywiVg8w"
|
| 27 |
REDDIT_USER_AGENT = "SoundaryaR_Bot/1.0"
|
| 28 |
YOUTUBE_API_KEY = "AIzaSyAChqXPaiNE9hKhApkgjgonzdgiCCOo"
|
| 29 |
|
|
|
|
| 30 |
reddit = praw.Reddit(client_id=REDDIT_CLIENT_ID, client_secret=REDDIT_CLIENT_SECRET, user_agent=REDDIT_USER_AGENT)
|
| 31 |
youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
|
| 32 |
bert_classifier = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
|
| 33 |
vader_analyzer = SentimentIntensityAnalyzer()
|
| 34 |
|
|
|
|
| 35 |
@st.cache_data
|
| 36 |
def load_twitter_data():
|
| 37 |
df = pd.read_csv("twitter_dataset.csv", encoding='latin-1',
|
|
|
|
| 40 |
df['sentiment'] = df['sentiment'].map({0: 'negative', 4: 'positive'})
|
| 41 |
return df.sample(10000)
|
| 42 |
|
|
|
|
| 43 |
def fetch_reddit_data(keyword):
|
| 44 |
subreddit = reddit.subreddit("all")
|
| 45 |
posts = subreddit.search(keyword, limit=100)
|
| 46 |
+
return pd.DataFrame([{'date': datetime.fromtimestamp(post.created_utc), 'text': post.title + " " + post.selftext} for post in posts])
|
|
|
|
|
|
|
|
|
|
| 47 |
|
|
|
|
| 48 |
def fetch_youtube_data(keyword):
|
| 49 |
request = youtube.search().list(q=keyword, part="snippet", maxResults=50, type="video")
|
| 50 |
response = request.execute()
|
| 51 |
+
return pd.DataFrame([{
|
| 52 |
+
'date': datetime.strptime(item['snippet']['publishedAt'], "%Y-%m-%dT%H:%M:%SZ"),
|
| 53 |
+
'text': item['snippet']['title'] + " " + item['snippet']['description']
|
| 54 |
+
} for item in response['items']])
|
| 55 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
def get_bert_sentiment(text):
|
| 57 |
try:
|
| 58 |
result = bert_classifier(text[:512])[0]
|
|
|
|
| 70 |
avg_score = (bert_score + abs(vader_score)) / 2
|
| 71 |
return 1 if avg_score > 0.5 else 0, avg_score
|
| 72 |
|
|
|
|
| 73 |
st.sidebar.title("Keyword Search")
|
| 74 |
keyword = st.sidebar.text_input("Enter a keyword (e.g., 'happy')", value="happy")
|
| 75 |
|
|
|
|
| 76 |
twitter_df = load_twitter_data()
|
| 77 |
twitter_filtered = twitter_df[twitter_df['text'].str.contains(keyword, case=False, na=False)]
|
| 78 |
reddit_df = fetch_reddit_data(keyword)
|
| 79 |
youtube_df = fetch_youtube_data(keyword)
|
| 80 |
|
|
|
|
| 81 |
platforms = {'Twitter': twitter_filtered, 'Reddit': reddit_df, 'YouTube': youtube_df}
|
| 82 |
valid_platforms = {k: v for k, v in platforms.items() if not v.empty}
|
| 83 |
|
|
|
|
| 86 |
else:
|
| 87 |
for platform, df in valid_platforms.items():
|
| 88 |
st.subheader(f"{platform} Analysis for '{keyword}'")
|
| 89 |
+
st.write(f"{platform} Data Preview:", df.head())
|
|
|
|
|
|
|
|
|
|
| 90 |
|
|
|
|
| 91 |
with st.spinner(f"Analyzing {platform} sentiments..."):
|
| 92 |
df['bert_sentiment'], df['bert_score'] = zip(*df['text'].apply(get_bert_sentiment))
|
| 93 |
df['vader_sentiment'], df['vader_score'] = zip(*df['text'].apply(get_vader_sentiment))
|
| 94 |
df['combined_sentiment'], df['combined_score'] = zip(*df['text'].apply(combined_sentiment))
|
|
|
|
| 95 |
|
| 96 |
+
st.write(df[['text', 'combined_sentiment', 'combined_score']].head())
|
| 97 |
+
|
| 98 |
daily_sentiment = df.groupby(df['date'].dt.date)['combined_score'].mean().reset_index()
|
| 99 |
daily_sentiment['date'] = pd.to_datetime(daily_sentiment['date'])
|
| 100 |
daily_sentiment['tweet_count'] = df.groupby(df['date'].dt.date).size().values
|
| 101 |
|
| 102 |
if len(daily_sentiment) < 8:
|
| 103 |
+
st.warning(f"Not enough {platform} data for prediction.")
|
| 104 |
+
fig, ax = plt.subplots()
|
| 105 |
+
ax.plot(daily_sentiment['date'], daily_sentiment['combined_score'], label='Historical')
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
st.pyplot(fig)
|
| 107 |
+
continue
|
| 108 |
+
|
| 109 |
+
scaler = MinMaxScaler()
|
| 110 |
+
daily_sentiment['scaled_score'] = scaler.fit_transform(daily_sentiment[['combined_score']])
|
| 111 |
+
|
| 112 |
+
def create_sequences(data, seq_length):
|
| 113 |
+
X, y = [], []
|
| 114 |
+
for i in range(len(data) - seq_length):
|
| 115 |
+
X.append(data[i:i + seq_length])
|
| 116 |
+
y.append(data[i + seq_length])
|
| 117 |
+
return np.array(X), np.array(y)
|
| 118 |
+
|
| 119 |
+
seq_length = 7
|
| 120 |
+
X, y = create_sequences(daily_sentiment['scaled_score'].values, seq_length)
|
| 121 |
+
X = X.reshape((X.shape[0], X.shape[1], 1))
|
| 122 |
+
|
| 123 |
+
model = Sequential([
|
| 124 |
+
LSTM(50, return_sequences=True, input_shape=(seq_length, 1)),
|
| 125 |
+
Dropout(0.2),
|
| 126 |
+
LSTM(25),
|
| 127 |
+
Dropout(0.2),
|
| 128 |
+
Dense(1, activation='sigmoid')
|
| 129 |
+
])
|
| 130 |
+
model.compile(optimizer='adam', loss='mse')
|
| 131 |
+
model.fit(X, y, epochs=10, batch_size=32, validation_split=0.2, verbose=0)
|
| 132 |
+
|
| 133 |
+
last_seq = daily_sentiment['scaled_score'][-seq_length:].values.reshape((1, seq_length, 1))
|
| 134 |
+
predictions = []
|
| 135 |
+
for _ in range(30):
|
| 136 |
+
pred = model.predict(last_seq, verbose=0)
|
| 137 |
+
predictions.append(pred[0][0])
|
| 138 |
+
last_seq = np.roll(last_seq, -1)
|
| 139 |
+
last_seq[0, -1, 0] = pred[0][0]
|
| 140 |
+
|
| 141 |
+
predictions = scaler.inverse_transform(np.array(predictions).reshape(-1, 1)).flatten()
|
| 142 |
+
|
| 143 |
+
X_lr = np.column_stack((daily_sentiment['scaled_score'], daily_sentiment['tweet_count']))
|
| 144 |
+
y_lr = (daily_sentiment['combined_score'] > 0.5).astype(int)
|
| 145 |
+
lr_model = LogisticRegression().fit(X_lr, y_lr)
|
| 146 |
+
|
| 147 |
+
future_dates = [daily_sentiment['date'].iloc[-1] + timedelta(days=i) for i in range(1, 31)]
|
| 148 |
+
X_future = np.column_stack((predictions, [daily_sentiment['tweet_count'].mean()] * 30))
|
| 149 |
+
lr_predictions = lr_model.predict_proba(X_future)[:, 1]
|
| 150 |
+
|
| 151 |
+
st.subheader(f"{platform} 30-Day Prediction")
|
| 152 |
+
fig, ax = plt.subplots()
|
| 153 |
+
ax.plot(daily_sentiment['date'], daily_sentiment['combined_score'], 'g-', label='Historical')
|
| 154 |
+
ax.plot(future_dates, predictions, 'b--', label='Predicted')
|
| 155 |
+
ax.legend()
|
| 156 |
+
st.pyplot(fig)
|
| 157 |
+
|
| 158 |
+
st.subheader(f"{platform} Logistic Regression SHAP")
|
| 159 |
+
explainer = shap.Explainer(lr_model, X_lr)
|
| 160 |
+
shap_values = explainer(X_lr)
|
| 161 |
+
shap.plots.beeswarm(shap_values, show=False)
|
| 162 |
+
st.pyplot(plt.gcf())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|