Arjon07CSE commited on
Commit
146a6c0
·
verified ·
1 Parent(s): b9c5023

updated the name

Browse files
Files changed (1) hide show
  1. app.py +1643 -0
app.py CHANGED
@@ -0,0 +1,1643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # SOCIAL PERCEPTION ANALYZER - FINAL COMPLETE APPLICATION
3
+ # Version: 4.1 (Fully Refactored, Production-Ready)
4
+ # ==============================================================================
5
+ # --- IMPORTS ---
6
+ import re
7
+ from GoogleNews import GoogleNews
8
+ from requests.exceptions import HTTPError
9
+ import pandas as pd
10
+ import logging
11
+ import time
12
+ from datetime import datetime, timezone
13
+ from logging.handlers import RotatingFileHandler
14
+ import gradio as gr
15
+ import matplotlib.pyplot as plt
16
+ from matplotlib.font_manager import FontProperties, fontManager
17
+ import seaborn as sns
18
+ from wordcloud import WordCloud
19
+ import dateparser
20
+ import numpy as np
21
+ import os
22
+
23
+ # ==============================================================================
24
+ # SETUP PRODUCTION-GRADE LOGGING & CONFIGURATION
25
+ # ==============================================================================
26
+ log_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
27
+ log_handler = RotatingFileHandler('app.log', maxBytes=5*1024*1024, backupCount=2)
28
+ log_handler.setFormatter(log_formatter)
29
+ logger = logging.getLogger()
30
+ logger.setLevel(logging.INFO)
31
+ if not logger.handlers:
32
+ logger.addHandler(log_handler)
33
+ logger.info("Application starting up.")
34
+
35
+ # --- APPLICATION CONFIGURATION ---
36
+ APP_TITLE = "Prohori (প্রহরী)"
37
+ APP_TAGLINE = "Analyze GoogleNews & YouTube video trends, engagement, and comment activity for your search topics."
38
+ APP_FOOTER = "Developed by Arjon"
39
+
40
+ # --- FONT CONFIGURATION ---
41
+ FONT_PATH = 'NotoSansBengali-Regular.ttf'
42
+ BANGLA_FONT = FONT_PATH
43
+
44
+ def setup_bangla_font():
45
+ """Properly set up Bengali font for all visualizations"""
46
+ global BANGLA_FONT
47
+ # Strictly enforce NotoSansBengali-Regular.ttf for all Bengali text
48
+ if os.path.exists(FONT_PATH):
49
+ try:
50
+ fontManager.addfont(FONT_PATH)
51
+ BANGLA_FONT = FontProperties(fname=FONT_PATH)
52
+ plt.rcParams['font.family'] = BANGLA_FONT.get_name()
53
+ plt.rcParams['axes.unicode_minus'] = False
54
+ logger.info(f"Successfully loaded '{FONT_PATH}' for Bengali text.")
55
+ return True
56
+ except Exception as e:
57
+ logger.error(f"Error loading Bengali font: {e}")
58
+ return False
59
+ else:
60
+ logger.error(f"Font file {FONT_PATH} not found. Bengali text will not render correctly.")
61
+ BANGLA_FONT = None
62
+ plt.rcParams['font.family'] = 'NotoSansBengali-Regular'
63
+ return False
64
+
65
+ # Initialize font system
66
+ font_loaded = setup_bangla_font()
67
+
68
+ # ==============================================================================
69
+ # CORE HELPER FUNCTIONS
70
+ # ==============================================================================
71
+ def clean_bengali_text(text):
72
+ """Remove non-Bengali characters except spaces and underscores (for joined phrases)"""
73
+ cleaned = re.sub(r'[^\u0980-\u09FF_\s]', '', str(text))
74
+ cleaned = re.sub(r'\s+', ' ', cleaned).strip()
75
+ return cleaned
76
+
77
+ # Comprehensive stopword list for Bengali text analysis
78
+ BANGLA_STOP_WORDS = [
79
+ 'অতএব', 'অথচ', 'অথবা', 'অনুযায়ী', 'অনেক', 'অনেকে', 'অনেকেই', 'অন্তত', 'অন্য', 'অবধি', 'অবশ্য',
80
+ 'অভিপ্রায়', 'একে', 'একই', 'একেবারে', 'একটি', 'একবার', 'এখন', 'এখনও', 'এখানে', 'এখানেই', 'এটি',
81
+ 'এতটাই', 'এতদূর', 'এতটুকু', 'এক', 'এবং', 'এবার', 'এমন', 'এমনভাবে', 'এর', 'এরা', 'এঁরা', 'এঁদের',
82
+ 'এই', 'এইভাবে', 'ও', 'ওঁরা', 'ওঁর', 'ওঁদের', 'ওকে', 'ওখানে', 'ওদের', 'ওর', 'কাছ', 'কাছে', 'কাজ',
83
+ 'কারণ', 'কিছু', 'কিছুই', 'কিন্তু', 'কিভাবে', 'কেন', 'কোন', 'কোনও', 'কোনো', 'ক্ষেত্রে', 'খুব',
84
+ 'গুলি', 'গিয়ে', 'চায়', 'ছাড়া', 'জন্য', 'জানা', 'ঠিক', 'তিনি', 'তিন', 'তিনিও', 'তাকে', 'তাঁকে',
85
+ 'তার', 'তাঁর', 'তারা', 'তাঁরা', 'তাদের', 'তাঁদের', 'তাহলে', 'থাকলেও', 'থেকে', 'মধ্যেই', 'মধ্যে',
86
+ 'দ্বারা', 'নয়', 'না', 'নিজের', 'নিজে', 'নিয়ে', 'পারেন', 'পারা', 'পারে', 'পরে', 'পর্যন্ত', 'পুনরায়',
87
+ 'ফলে', 'বজায়', 'বা', 'বাদে', 'বার', 'বিশেষ', 'বিভিন্ন', 'ব্যবহার', 'ব্যাপারে', 'ভাবে', 'ভাবেই', 'মাধ্যমে',
88
+ 'মতো', 'মতোই', 'যখন', 'যদি', 'য���িও', 'যা', 'যাকে', 'যাওয়া', 'যায়', 'যে', 'যেখানে', 'যেতে', 'যেমন',
89
+ 'যেহেতু', 'রহিছে', 'শিক্ষা', 'শুধু', 'সঙ্গে', 'সব', 'সমস্ত', 'সম্প্রতি', 'সহ', 'সাধারণ', 'সামনে', 'হতে',
90
+ 'হতেই', 'হবে', 'হয়', 'হয়তো', 'হয়', 'হচ্ছে', 'হত', 'হলে', 'হলেও', 'হয়নি', 'হাজার', 'হোওয়া', 'আরও', 'আমরা',
91
+ 'আমার', 'আমি', 'আর', 'আগে', 'আগেই', 'আছে', 'আজ', 'তাকে', 'তাতে', 'তাদের', 'তাহার', 'তাহাতে', 'তাহারই',
92
+ 'তথা', 'তথাপি', 'সে', 'সেই', 'সেখান', 'সেখানে', 'থেকে', 'নাকি', 'নাগাদ', 'দু', 'দুটি', 'সুতরাং',
93
+ 'সম্পর্কে', 'সঙ্গেও', 'সর্বাধিক', 'সর্বদা', 'সহ', 'হৈতে', 'হইবে', 'হইয়া', 'হৈল', 'জানিয়েছেন', 'প্রতিবেদক'
94
+ ]
95
+
96
+ COMBINED_STOPWORDS = set(BANGLA_STOP_WORDS)
97
+
98
+ PHRASES_TO_JOIN = {
99
+ "তারেক রহমান": "তারেক_রহমান",
100
+ "খালেদা জিয়া": "খালেদা_জিয়া",
101
+ "বিএনপি জিন্দাবাদ": "বিএনপি_জিন্দাবাদ"
102
+
103
+ }
104
+
105
+ def get_dynamic_time_agg(start_date, end_date):
106
+ """Determine appropriate time aggregation level based on date range"""
107
+ if not isinstance(start_date, pd.Timestamp) or not isinstance(end_date, pd.Timestamp):
108
+ return 'D', 'Daily' # Graceful fallback
109
+
110
+ delta = end_date - start_date
111
+ if delta.days <= 2:
112
+ return 'H', 'Hourly'
113
+ if delta.days <= 90:
114
+ return 'D', 'Daily'
115
+ if delta.days <= 730:
116
+ return 'W', 'Weekly'
117
+ return 'M', 'Monthly'
118
+
119
+ def kpi_badge_html(value, label, threshold_high=None, threshold_low=None):
120
+ """
121
+ Returns HTML for a color-coded KPI badge.
122
+ Green for high, red for low, yellow for medium.
123
+ """
124
+ try:
125
+ # Handle comma-separated numbers
126
+ if isinstance(value, str) and ',' in value:
127
+ val = float(value.replace(',', ''))
128
+ else:
129
+ val = float(value)
130
+ except (TypeError, ValueError, AttributeError):
131
+ val = value
132
+
133
+ color = '#e0e0e0' # default
134
+ if threshold_high is not None and isinstance(val, (int, float)) and val >= threshold_high:
135
+ color = '#4caf50' # green
136
+ elif threshold_low is not None and isinstance(val, (int, float)) and val <= threshold_low:
137
+ color = '#f44336' # red
138
+ elif threshold_high is not None and threshold_low is not None and isinstance(val, (int, float)):
139
+ color = '#ffeb3b' # yellow
140
+
141
+ # Format value with commas for large numbers
142
+ if isinstance(value, (int, float)):
143
+ formatted_value = f"{value:,.0f}"
144
+ else:
145
+ formatted_value = str(value)
146
+
147
+ return f"<div style='display:inline-block;padding:8px 16px;border-radius:8px;background:{color};color:#222;font-weight:bold;margin:2px;'>{label}: {formatted_value}</div>"
148
+
149
+ def set_plot_style():
150
+ """Configure consistent matplotlib style for all visualizations"""
151
+ plt.style.use('seaborn-v0_8-whitegrid')
152
+ plt.rcParams['figure.dpi'] = 100
153
+ plt.rcParams['savefig.dpi'] = 300
154
+ plt.rcParams['figure.figsize'] = (10, 6)
155
+ # Always use NotoSansBengali-Regular.ttf for Bengali text
156
+ if BANGLA_FONT and BANGLA_FONT.get_name():
157
+ plt.rcParams['font.family'] = BANGLA_FONT.get_name()
158
+ else:
159
+ plt.rcParams['font.family'] = 'sans-serif'
160
+ plt.rcParams['axes.unicode_minus'] = False # Fix for minus sign rendering
161
+
162
+ def cleanup_figures(*figures):
163
+ """Properly close matplotlib figures to prevent memory leaks"""
164
+ for fig in figures:
165
+ if fig is not None:
166
+ try:
167
+ plt.close(fig)
168
+ except:
169
+ pass
170
+
171
+ # ==============================================================================
172
+ # NEWS SCRAPER BACKEND
173
+ # ==============================================================================
174
+ def run_news_scraper_pipeline(search_keywords, sites, start_date_str, end_date_str, interval, max_pages, filter_keys, progress=gr.Progress()):
175
+ """Full implementation of the news scraper with robust error handling."""
176
+ # Input validation and sanitization
177
+ search_keywords = str(search_keywords).strip() if search_keywords else ""
178
+ sites = str(sites).strip() if sites else ""
179
+ start_date_str = str(start_date_str).strip() if start_date_str else ""
180
+ end_date_str = str(end_date_str).strip() if end_date_str else ""
181
+ filter_keys = str(filter_keys).strip() if filter_keys else ""
182
+
183
+ if not all([search_keywords, start_date_str, end_date_str]):
184
+ raise gr.Error("Search Keywords, Start Date, and End Date are required.")
185
+
186
+ start_dt = dateparser.parse(start_date_str)
187
+ end_dt = dateparser.parse(end_date_str)
188
+
189
+ if not all([start_dt, end_dt]):
190
+ raise gr.Error("Invalid date format. Please use a recognizable format like YYYY-MM-DD or '2 weeks ago'.")
191
+
192
+ # Ensure start date is before end date
193
+ if start_dt > end_dt:
194
+ start_dt, end_dt = end_dt, start_dt
195
+ gr.Warning("Start date was after end date. Dates have been swapped.")
196
+
197
+ all_articles, current_dt = [], start_dt
198
+ total_intervals = (end_dt - start_dt).days // interval + 1
199
+
200
+ while current_dt <= end_dt:
201
+ try:
202
+ interval_end_dt = min(current_dt + pd.Timedelta(days=interval - 1), end_dt)
203
+ start_str, end_str = current_dt.strftime('%Y-%m-%d'), interval_end_dt.strftime('%Y-%m-%d')
204
+
205
+ progress((current_dt - start_dt).days / (end_dt - start_dt).days,
206
+ desc=f"Fetching news from {start_str} to {end_str}")
207
+
208
+ site_query = f"({' OR '.join(['site:' + s.strip() for s in sites.split(',') if s.strip()])})" if sites else ""
209
+ final_query = f'"{search_keywords}" {site_query} after:{start_str} before:{end_str}'
210
+
211
+ googlenews = GoogleNews(lang='bn', region='BD', period='1d')
212
+ googlenews.search(final_query)
213
+
214
+ for page in range(1, max_pages + 1):
215
+ try:
216
+ results = googlenews.results()
217
+ if not results:
218
+ break
219
+ all_articles.extend(results)
220
+
221
+ if page < max_pages:
222
+ googlenews.getpage(page + 1)
223
+ time.sleep(0.3) # Reduced sleep for performance
224
+ except HTTPError as e:
225
+ if e.response.status_code == 429:
226
+ wait_time = 3 # Reduced wait for optimization
227
+ gr.Warning(f"Rate limited by Google News. Pausing for {wait_time} seconds.")
228
+ time.sleep(wait_time)
229
+ else:
230
+ logger.error(f"HTTP Error fetching news: {e}")
231
+ break
232
+ except Exception as e:
233
+ logger.error(f"An error occurred fetching news: {e}")
234
+ break
235
+
236
+ current_dt += pd.Timedelta(days=interval)
237
+ except Exception as e:
238
+ logger.error(f"Error in news scraping loop: {e}")
239
+ break
240
+
241
+ if not all_articles:
242
+ return pd.DataFrame(), pd.DataFrame()
243
+
244
+ # Create DataFrame and clean data
245
+ df = pd.DataFrame(all_articles).drop_duplicates(subset=['link'])
246
+
247
+ # Parse dates safely
248
+ df['published_date'] = df['date'].apply(lambda x: dateparser.parse(x, languages=['bn']) if pd.notna(x) else None)
249
+
250
+ # Drop rows with missing critical data
251
+ df = df.dropna(subset=['published_date', 'title'])
252
+
253
+ # Apply advanced filtering if filter keywords are provided
254
+ if filter_keys and filter_keys.strip():
255
+ def match_complex_query(text, query):
256
+ """Advanced query parser supporting AND, OR, NOT logic"""
257
+ if not text or not query:
258
+ return False
259
+
260
+ text = str(text).lower()
261
+ query = query.lower()
262
+
263
+ # Simple tokenization that preserves phrases in quotes
264
+ tokens = re.findall(r'"[^"]+"|\S+', query)
265
+
266
+ # Build a regex pattern from the tokens
267
+ patterns = []
268
+ for token in tokens:
269
+ if token == 'and':
270
+ continue # We'll handle this with the final pattern
271
+ elif token == 'or':
272
+ patterns.append('|')
273
+ elif token == 'not':
274
+ patterns.append('(?=^(?!.*')
275
+ else:
276
+ # Clean token and convert to regex pattern
277
+ clean_token = token.strip('"')
278
+ if clean_token.startswith('"') and clean_token.endswith('"'):
279
+ clean_token = clean_token[1:-1]
280
+ patterns.append(re.escape(clean_token))
281
+
282
+ # Join patterns and handle negation
283
+ final_pattern = ''.join(patterns)
284
+ if '(?=' in final_pattern:
285
+ final_pattern += '))'
286
+
287
+ try:
288
+ return bool(re.search(final_pattern, text))
289
+ except:
290
+ # Fallback to simple substring match if regex fails
291
+ return any(token in text for token in tokens if token not in ['and', 'or', 'not'])
292
+
293
+ # Apply filtering to title and description
294
+ mask = df.apply(lambda row: match_complex_query(
295
+ str(row['title']) + ' ' + str(row.get('desc', '')),
296
+ filter_keys
297
+ ), axis=1)
298
+
299
+ df = df[mask]
300
+
301
+ # Return both full dataset and filtered display dataset
302
+ # Always return all Google News fields (published_date, title, media, description, link)
303
+ # Some sources use 'desc', some use 'description'. Unify to 'description'.
304
+ if 'desc' in df.columns and 'description' not in df.columns:
305
+ df['description'] = df['desc']
306
+ return df, df[['published_date', 'title', 'media', 'description', 'link']].sort_values(by='published_date', ascending=False)
307
+
308
+ # ==============================================================================
309
+ # YOUTUBE ANALYZER BACKEND
310
+ # ==============================================================================
311
+ def run_youtube_analysis_pipeline(api_key, query, max_videos_for_stats, num_videos_for_comments, max_comments_per_video, published_after, progress=gr.Progress()):
312
+ """Complete YouTube analysis pipeline with robust error handling."""
313
+ # Use integrated API key for seamless experience
314
+ api_key = os.getenv("YOUTUBE_API_KEY", "AIzaSyAiiGsKTJyIe4SRfC2uUXwhQ6KO-DEjgIA")
315
+
316
+ if not query:
317
+ raise gr.Error("Search Keywords are required.")
318
+
319
+ try:
320
+ from googleapiclient.discovery import build
321
+ from googleapiclient.errors import HttpError
322
+ youtube = build('youtube', 'v3', developerKey=api_key)
323
+ except ImportError:
324
+ logger.error("Required YouTube API libraries not installed")
325
+ raise gr.Error("YouTube analysis requires additional libraries. Please install google-api-python-client.")
326
+ except HttpError as e:
327
+ raise gr.Error(f"Failed to initialize YouTube service. Check API Key. Error: {e}")
328
+ except Exception as e:
329
+ raise gr.Error(f"An unexpected error occurred during API initialization: {e}")
330
+
331
+ progress(0.1, desc="Performing broad scan for videos...")
332
+ all_video_ids, next_page_token, total_results_estimate = [], None, 0
333
+ PAGES_TO_FETCH = min(15, (max_videos_for_stats // 50) + 1)
334
+
335
+ search_params = {
336
+ 'q': query,
337
+ 'part': 'id',
338
+ 'maxResults': 50,
339
+ 'type': 'video',
340
+ 'order': 'relevance'
341
+ }
342
+
343
+ if published_after:
344
+ parsed_date = dateparser.parse(published_after)
345
+ if parsed_date:
346
+ search_params['publishedAfter'] = parsed_date.replace(tzinfo=timezone.utc).isoformat()
347
+ else:
348
+ gr.Warning(f"Could not parse date: '{published_after}'. Ignoring filter.")
349
+
350
+ for page in range(PAGES_TO_FETCH):
351
+ try:
352
+ if next_page_token:
353
+ search_params['pageToken'] = next_page_token
354
+
355
+ response = youtube.search().list(**search_params).execute()
356
+
357
+ if page == 0:
358
+ total_results_estimate = response.get('pageInfo', {}).get('totalResults', 0)
359
+
360
+ # Extract valid video IDs
361
+ valid_ids = []
362
+ for item in response.get('items', []):
363
+ if 'id' in item and 'videoId' in item['id']:
364
+ valid_ids.append(item['id']['videoId'])
365
+
366
+ all_video_ids.extend(valid_ids)
367
+
368
+ next_page_token = response.get('nextPageToken')
369
+ progress(0.1 + (0.3 * (page / PAGES_TO_FETCH)),
370
+ desc=f"Broad scan: Found {len(all_video_ids)} videos...")
371
+
372
+ if not next_page_token:
373
+ break
374
+ except HttpError as e:
375
+ if "quotaExceeded" in str(e):
376
+ raise gr.Error("CRITICAL: YouTube API daily quota exceeded. Try again tomorrow.")
377
+ logger.error(f"HTTP error during video search: {e}")
378
+ break
379
+ except Exception as e:
380
+ logger.error(f"Unexpected error during YouTube search: {e}")
381
+ break
382
+
383
+ if not all_video_ids:
384
+ return pd.DataFrame(), pd.DataFrame(), ""
385
+
386
+ # Fetch video details in batches
387
+ progress(0.4, desc=f"Fetching details for {len(all_video_ids)} videos...")
388
+
389
+ def _fetch_video_details(youtube_service, video_ids: list):
390
+ """Fetch detailed information for a batch of video IDs"""
391
+ all_videos_data = []
392
+ try:
393
+ for i in range(0, len(video_ids), 50):
394
+ id_batch = video_ids[i:i+50]
395
+ video_request = youtube_service.videos().list(
396
+ part="snippet,statistics",
397
+ id=",".join(id_batch)
398
+ )
399
+ video_response = video_request.execute()
400
+
401
+ for item in video_response.get('items', []):
402
+ stats = item.get('statistics', {})
403
+ all_videos_data.append({
404
+ 'video_id': item['id'],
405
+ 'video_title': item['snippet']['title'],
406
+ 'channel': item['snippet']['channelTitle'],
407
+ 'published_date': item['snippet']['publishedAt'],
408
+ 'view_count': int(stats.get('viewCount', 0)),
409
+ 'like_count': int(stats.get('likeCount', 0)),
410
+ 'comment_count': int(stats.get('commentCount', 0))
411
+ })
412
+ except Exception as e:
413
+ logger.error(f"Could not fetch video details: {e}")
414
+
415
+ return all_videos_data
416
+
417
+ videos_df_full_scan = pd.DataFrame(_fetch_video_details(youtube, all_video_ids))
418
+
419
+ if videos_df_full_scan.empty:
420
+ return pd.DataFrame(), pd.DataFrame(), ""
421
+
422
+ # Process and clean video data
423
+ videos_df_full_scan['published_date'] = pd.to_datetime(videos_df_full_scan['published_date'])
424
+
425
+ # Calculate engagement rate safely
426
+ videos_df_full_scan['engagement_rate'] = (
427
+ (videos_df_full_scan['like_count'] + videos_df_full_scan['comment_count']) /
428
+ videos_df_full_scan['view_count'].replace(0, 1)
429
+ ).fillna(0)
430
+
431
+ videos_df_full_scan = videos_df_full_scan.sort_values(
432
+ by='view_count',
433
+ ascending=False
434
+ ).reset_index(drop=True)
435
+
436
+ # Fetch comments for top videos
437
+ videos_to_scrape_df = videos_df_full_scan.head(int(num_videos_for_comments))
438
+ all_comments = []
439
+
440
+ def _scrape_single_video_comments(youtube_service, video_id, max_comments):
441
+ """Scrape comments for a single video with error handling"""
442
+ comments_list = []
443
+ try:
444
+ request = youtube_service.commentThreads().list(
445
+ part="snippet",
446
+ videoId=video_id,
447
+ maxResults=min(max_comments, 100),
448
+ order='relevance',
449
+ textFormat="plainText"
450
+ )
451
+ response = request.execute()
452
+
453
+ for item in response.get('items', []):
454
+ snippet = item['snippet']['topLevelComment']['snippet']
455
+ comments_list.append({
456
+ 'author': snippet['authorDisplayName'],
457
+ 'published_date_comment': snippet['publishedAt'],
458
+ 'comment_text': snippet['textDisplay'],
459
+ 'likes': snippet['likeCount'],
460
+ 'replies': item['snippet']['totalReplyCount']
461
+ })
462
+ except Exception as e:
463
+ logger.warning(f"Could not retrieve comments for video {video_id}: {e}")
464
+
465
+ return comments_list
466
+
467
+ for index, row in videos_to_scrape_df.iterrows():
468
+ progress(0.7 + (0.3 * (index / len(videos_to_scrape_df))),
469
+ desc=f"Deep dive: Scraping comments from video {index+1}/{len(videos_to_scrape_df)}...")
470
+
471
+ comments_for_video = _scrape_single_video_comments(
472
+ youtube,
473
+ row['video_id'],
474
+ max_comments_per_video
475
+ )
476
+
477
+ if comments_for_video:
478
+ for comment in comments_for_video:
479
+ comment.update({
480
+ 'video_id': row['video_id'],
481
+ 'video_title': row['video_title']
482
+ })
483
+ all_comments.extend(comments_for_video)
484
+
485
+ comments_df = pd.DataFrame(all_comments)
486
+ if not comments_df.empty:
487
+ comments_df['published_date_comment'] = pd.to_datetime(comments_df['published_date_comment'])
488
+
489
+ logger.info(f"YouTube analysis complete. Est. total videos: {total_results_estimate}. "
490
+ f"Scanned: {len(videos_df_full_scan)}. Comments: {len(comments_df)}.")
491
+
492
+ # Create summary HTML
493
+ summary_html = f"""
494
+ <div style='background:#f5f5f5;padding:16px;border-radius:12px;margin-bottom:12px;box-shadow:0 2px 8px #eee;'>
495
+ <h3 style='margin:0 0 8px 0;'>YouTube Analytics Summary</h3>
496
+ <ul style='margin:0;padding-left:18px;'>
497
+ <li><b>Total Videos:</b> {len(videos_df_full_scan):,}</li>
498
+ <li><b>Total Comments:</b> {len(comments_df):,}</li>
499
+ <li><b>Total Views:</b> {videos_df_full_scan['view_count'].sum():,}</li>
500
+ </ul>
501
+ </div>
502
+ """
503
+
504
+ return videos_df_full_scan, comments_df, summary_html
505
+
506
+ # ==============================================================================
507
+ # ADVANCED ANALYTICS MODULE
508
+ # ==============================================================================
509
+ def generate_scraper_dashboard(df: pd.DataFrame):
510
+ """Generate comprehensive dashboard from news scraper results."""
511
+ if df.empty:
512
+ # Return empty dashboard components
513
+ return {
514
+ "kpi_total_articles": gr.HTML(""),
515
+ "kpi_unique_media": gr.HTML(""),
516
+ "kpi_date_range": gr.HTML(""),
517
+ "dashboard_timeline_plot": None,
518
+ "dashboard_media_plot": None,
519
+ "dashboard_wordcloud_plot": None
520
+ }
521
+
522
+ set_plot_style()
523
+
524
+ # Calculate KPIs
525
+ total_articles, unique_media = len(df), df['media'].nunique()
526
+ start_date, end_date = pd.to_datetime(df['published_date']).min(), pd.to_datetime(df['published_date']).max()
527
+ date_range_str = f"{start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')}"
528
+
529
+ # Color-coded KPI badges
530
+ kpi_total_articles_html = kpi_badge_html(
531
+ total_articles, 'Total Articles', threshold_high=100, threshold_low=10
532
+ )
533
+ kpi_unique_media_html = kpi_badge_html(
534
+ unique_media, 'Unique Media', threshold_high=10, threshold_low=2
535
+ )
536
+ kpi_date_range_html = kpi_badge_html(
537
+ date_range_str, 'Date Range', threshold_high=None, threshold_low=None
538
+ )
539
+
540
+ # Time series visualization - FIXED GRADIO API USAGE
541
+ agg_code, agg_name = get_dynamic_time_agg(start_date, end_date)
542
+ timeline_df = df.set_index(pd.to_datetime(df['published_date'])).resample(agg_code).size().reset_index(name='count')
543
+ timeline_df.rename(columns={'published_date': 'date'}, inplace=True)
544
+ timeline_plot = gr.LinePlot(
545
+ value=timeline_df,
546
+ x='date',
547
+ y='count',
548
+ title=f'{agg_name} News Volume',
549
+ tooltip=['date', 'count'],
550
+ x_title="Date",
551
+ y_title="Number of Articles"
552
+ )
553
+
554
+ # Media source analysis
555
+ media_counts = df['media'].dropna().value_counts().nlargest(15).sort_values()
556
+ fig_media = None
557
+ if not media_counts.empty:
558
+ fig_media, ax = plt.subplots(figsize=(8, 6))
559
+ media_counts.plot(kind='barh', ax=ax, color='skyblue')
560
+ ax.set_title("Top 15 Media Sources", fontproperties=BANGLA_FONT, fontsize=18)
561
+ ax.set_xlabel("Article Count", fontproperties=BANGLA_FONT, fontsize=14)
562
+ ax.set_ylabel("মিডিয়া", fontproperties=BANGLA_FONT, fontsize=14)
563
+ yticks = np.arange(len(media_counts.index))
564
+ ax.set_yticks(yticks)
565
+ ax.set_yticklabels(media_counts.index, fontproperties=BANGLA_FONT, fontsize=14)
566
+ for label in ax.get_xticklabels():
567
+ label.set_fontproperties(BANGLA_FONT)
568
+ label.set_fontsize(12)
569
+ for label in ax.get_yticklabels():
570
+ label.set_fontproperties(BANGLA_FONT)
571
+ label.set_fontsize(14)
572
+ legend = ax.get_legend()
573
+ if legend:
574
+ for text in legend.get_texts():
575
+ text.set_fontproperties(BANGLA_FONT)
576
+ plt.tight_layout()
577
+
578
+ # Word cloud generation
579
+ fig_wc = None
580
+ try:
581
+ # Combine all titles and clean text
582
+ text = " ".join(title for title in df['title'].astype(str))
583
+ text = clean_bengali_text(text)
584
+
585
+ # Join special phrases
586
+ for phrase, joined in PHRASES_TO_JOIN.items():
587
+ text = text.replace(phrase, joined)
588
+
589
+ # Extract and filter words
590
+ words = re.findall(r'[\u0980-\u09FF_]{2,}', text)
591
+ words = [w for w in words if w not in COMBINED_STOPWORDS]
592
+ words = [w for w in words if len(w) > 1]
593
+ words = [w for w in words if not re.search(r'[a-zA-Z]', w)]
594
+
595
+ # Filter by frequency
596
+ from collections import Counter
597
+ word_freq = Counter(words)
598
+ min_freq = 2
599
+ most_common = set([w for w, _ in word_freq.most_common(3)])
600
+ filtered_words = [w for w in words if word_freq[w] >= min_freq and w not in most_common]
601
+ wc_text = " ".join(filtered_words)
602
+
603
+ # Generate word cloud
604
+ if wc_text.strip():
605
+ wc = WordCloud(
606
+ font_path=FONT_PATH,
607
+ width=1600,
608
+ height=900,
609
+ background_color='white',
610
+ stopwords=COMBINED_STOPWORDS,
611
+ collocations=False,
612
+ colormap='plasma',
613
+ max_words=200,
614
+ contour_width=2,
615
+ contour_color='steelblue',
616
+ regexp=r"[\u0980-\u09FF_]+"
617
+ ).generate(wc_text)
618
+
619
+ fig_wc, ax = plt.subplots(figsize=(15, 8))
620
+ ax.imshow(wc, interpolation='bilinear')
621
+ ax.axis("off")
622
+ ax.set_title("Bengali Headline Word Cloud", fontproperties=BANGLA_FONT, fontsize=22)
623
+ plt.tight_layout()
624
+ except Exception as e:
625
+ logger.error(f"WordCloud failed: {e}")
626
+ gr.Warning(f"WordCloud generation failed: {str(e)}")
627
+
628
+ return {
629
+ "kpi_total_articles": gr.HTML(kpi_total_articles_html),
630
+ "kpi_unique_media": gr.HTML(kpi_unique_media_html),
631
+ "kpi_date_range": gr.HTML(kpi_date_range_html),
632
+ "dashboard_timeline_plot": timeline_plot,
633
+ "dashboard_media_plot": fig_media,
634
+ "dashboard_wordcloud_plot": fig_wc
635
+ }
636
+
637
+ def generate_youtube_dashboard(videos_df, comments_df):
638
+ """Generate comprehensive dashboard from YouTube analysis results."""
639
+ # Initialize all dashboard components FIRST
640
+ dashboard_components = {
641
+ "kpi_yt_videos_found": gr.HTML(""),
642
+ "kpi_yt_views_scanned": gr.HTML(""),
643
+ "kpi_yt_comments_scraped": gr.HTML(""),
644
+ "yt_channel_plot": None,
645
+ "yt_channel_dominance_plot": None,
646
+ "yt_time_series_plot": None,
647
+ "yt_top_videos_plot": None,
648
+ "yt_content_quadrant_plot": None,
649
+ "yt_engagement_plot": None,
650
+ "yt_wordcloud_plot": None,
651
+ "yt_detailed_summary": gr.HTML("")
652
+ }
653
+
654
+ # Channel dominance by view
655
+ fig_channel_dominance = None
656
+ if videos_df is not None and not videos_df.empty and 'channel' in videos_df.columns:
657
+ channel_views = videos_df.groupby('channel')['view_count'].sum().sort_values(ascending=False).head(10)
658
+ if not channel_views.empty:
659
+ fig_channel_dominance, ax = plt.subplots(figsize=(10, 6))
660
+ channel_views.plot(kind='barh', ax=ax, color='slateblue')
661
+ ax.set_title("Top 10 Dominant Channels by View Count", fontproperties=BANGLA_FONT, fontsize=18)
662
+ ax.set_xlabel("মোট ভিউ", fontproperties=BANGLA_FONT, fontsize=14)
663
+ ax.set_ylabel("চ্যানেল", fontproperties=BANGLA_FONT, fontsize=14)
664
+ yticks = np.arange(len(channel_views.index))
665
+ ax.set_yticks(yticks)
666
+ ax.set_yticklabels(channel_views.index, fontproperties=BANGLA_FONT, fontsize=14)
667
+ for label in ax.get_xticklabels():
668
+ label.set_fontproperties(BANGLA_FONT)
669
+ label.set_fontsize(12)
670
+ for label in ax.get_yticklabels():
671
+ label.set_fontproperties(BANGLA_FONT)
672
+ label.set_fontsize(14)
673
+ legend = ax.get_legend()
674
+ if legend:
675
+ for text in legend.get_texts():
676
+ text.set_fontproperties(BANGLA_FONT)
677
+ plt.tight_layout()
678
+ dashboard_components["yt_channel_dominance_plot"] = fig_channel_dominance
679
+
680
+ # Content performance quadrant
681
+ fig_quadrant = None
682
+ if videos_df is not None and not videos_df.empty:
683
+ try:
684
+ # Define quadrant boundaries
685
+ median_views = videos_df['view_count'].median()
686
+ median_engagement = videos_df['engagement_rate'].median()
687
+ fig_quadrant, ax = plt.subplots(figsize=(10, 8))
688
+ scatter = ax.scatter(
689
+ videos_df['view_count'],
690
+ videos_df['engagement_rate'],
691
+ c='darkorange', alpha=0.7
692
+ )
693
+ ax.axvline(median_views, color='blue', linestyle='--', label='Median Views')
694
+ ax.axhline(median_engagement, color='green', linestyle='--', label='Median Engagement')
695
+ ax.set_xlabel("মোট ভিউ", fontproperties=BANGLA_FONT, fontsize=14)
696
+ ax.set_ylabel("এনগেজমেন্ট রেট", fontproperties=BANGLA_FONT, fontsize=14)
697
+ ax.set_title("Content Performance Quadrant", fontproperties=BANGLA_FONT, fontsize=18)
698
+ for label in ax.get_xticklabels():
699
+ label.set_fontproperties(BANGLA_FONT)
700
+ label.set_fontsize(12)
701
+ for label in ax.get_yticklabels():
702
+ label.set_fontproperties(BANGLA_FONT)
703
+ label.set_fontsize(14)
704
+ legend = ax.get_legend()
705
+ if legend:
706
+ for text in legend.get_texts():
707
+ text.set_fontproperties(BANGLA_FONT)
708
+ plt.tight_layout()
709
+ except Exception as e:
710
+ logger.error(f"Quadrant plot failed: {e}")
711
+ dashboard_components["yt_content_quadrant_plot"] = fig_quadrant
712
+
713
+ # Detailed analysis summary from YouTube API
714
+ detailed_summary = ""
715
+ if videos_df is not None and not videos_df.empty:
716
+ top_video = videos_df.iloc[0]
717
+ detailed_summary = f"<div style='background:#e3f2fd;padding:12px;border-radius:8px;margin-bottom:8px;'>"
718
+ detailed_summary += f"<b>Top Video:</b> {top_video['video_title']}<br>"
719
+ detailed_summary += f"<b>Channel:</b> {top_video['channel']}<br>"
720
+ detailed_summary += f"<b>Views:</b> {top_video['view_count']:,}<br>"
721
+ detailed_summary += f"<b>Likes:</b> {top_video['like_count']:,}<br>"
722
+ detailed_summary += f"<b>Comments:</b> {top_video['comment_count']:,}<br>"
723
+ detailed_summary += f"<b>Published:</b> {top_video['published_date'].strftime('%Y-%m-%d')}<br>"
724
+ detailed_summary += f"<b>Engagement Rate:</b> {top_video['engagement_rate']:.2f}"
725
+ detailed_summary += "</div>"
726
+ dashboard_components["yt_detailed_summary"] = gr.HTML(detailed_summary)
727
+
728
+ # Generate KPIs if data exists
729
+ if videos_df is not None and not videos_df.empty:
730
+ dashboard_components["kpi_yt_videos_found"] = gr.HTML(
731
+ kpi_badge_html(len(videos_df), 'Videos Found', threshold_high=50, threshold_low=5)
732
+ )
733
+ dashboard_components["kpi_yt_views_scanned"] = gr.HTML(
734
+ kpi_badge_html(videos_df['view_count'].sum(), 'Views Scanned', threshold_high=100000, threshold_low=1000)
735
+ )
736
+
737
+ if comments_df is not None and not comments_df.empty:
738
+ dashboard_components["kpi_yt_comments_scraped"] = gr.HTML(
739
+ kpi_badge_html(len(comments_df), 'Comments Scraped', threshold_high=100, threshold_low=10)
740
+ )
741
+
742
+ # Channel analysis
743
+ fig_channels = None
744
+ if videos_df is not None and not videos_df.empty and 'channel' in videos_df.columns:
745
+ channel_counts = videos_df['channel'].value_counts().nlargest(15).sort_values()
746
+ if not channel_counts.empty:
747
+ fig_channels, ax = plt.subplots(figsize=(8, 6))
748
+ channel_counts.plot(kind='barh', ax=ax, color='coral')
749
+ ax.set_title("Top 15 Channels by Video Volume", fontproperties=BANGLA_FONT, fontsize=18)
750
+ ax.set_yticklabels(channel_counts.index, fontproperties=BANGLA_FONT, fontsize=14)
751
+ ax.set_xlabel("Video Count", fontproperties=BANGLA_FONT, fontsize=14)
752
+ for label in ax.get_xticklabels():
753
+ label.set_fontproperties(BANGLA_FONT)
754
+ label.set_fontsize(12)
755
+ for label in ax.get_yticklabels():
756
+ label.set_fontproperties(BANGLA_FONT)
757
+ label.set_fontsize(14)
758
+ legend = ax.get_legend()
759
+ if legend:
760
+ for text in legend.get_texts():
761
+ text.set_fontproperties(BANGLA_FONT)
762
+ plt.tight_layout()
763
+ dashboard_components["yt_channel_plot"] = fig_channels
764
+
765
+ # Word cloud from comments
766
+ fig_wc = None
767
+ if comments_df is not None and not comments_df.empty and 'comment_text' in comments_df.columns:
768
+ try:
769
+ text = " ".join(comment for comment in comments_df['comment_text'].astype(str))
770
+ text = clean_bengali_text(text)
771
+
772
+ # Join special phrases
773
+ for phrase, joined in PHRASES_TO_JOIN.items():
774
+ text = text.replace(phrase, joined)
775
+
776
+ # Extract and filter words
777
+ words = re.findall(r'[\u0980-\u09FF_]{2,}', text)
778
+ words = [w for w in words if w not in COMBINED_STOPWORDS]
779
+ words = [w for w in words if len(w) > 1]
780
+ words = [w for w in words if not re.search(r'[a-zA-Z]', w)]
781
+
782
+ # Filter by frequency
783
+ from collections import Counter
784
+ word_freq = Counter(words)
785
+ min_freq = 2
786
+ most_common = set([w for w, _ in word_freq.most_common(3)])
787
+ filtered_words = [w for w in words if word_freq[w] >= min_freq and w not in most_common]
788
+ wc_text = " ".join(filtered_words)
789
+
790
+ # Generate word cloud
791
+ if wc_text.strip():
792
+ wc = WordCloud(
793
+ font_path=FONT_PATH,
794
+ width=1600,
795
+ height=900,
796
+ background_color='white',
797
+ stopwords=COMBINED_STOPWORDS,
798
+ collocations=False,
799
+ colormap='plasma',
800
+ max_words=250,
801
+ contour_width=2,
802
+ contour_color='darkorange',
803
+ regexp=r"[\u0980-\u09FF_]+"
804
+ ).generate(wc_text)
805
+
806
+ fig_wc, ax = plt.subplots(figsize=(15, 8))
807
+ ax.imshow(wc, interpolation='bilinear')
808
+ ax.axis("off")
809
+ ax.set_title("Bengali Word Cloud from YouTube Comments", fontproperties=BANGLA_FONT, fontsize=22)
810
+ plt.tight_layout()
811
+ except Exception as e:
812
+ logger.error(f"YouTube WordCloud failed: {e}")
813
+ dashboard_components["yt_wordcloud_plot"] = fig_wc
814
+
815
+ # Top commented videos
816
+ fig_top_videos = None
817
+ if comments_df is not None and not comments_df.empty and 'video_title' in comments_df.columns:
818
+ top_videos = comments_df['video_title'].value_counts().nlargest(10)
819
+ if not top_videos.empty:
820
+ fig_top_videos, ax = plt.subplots(figsize=(10, 6))
821
+ top_videos.plot(kind='barh', ax=ax, color='dodgerblue')
822
+ ax.set_title("Top 10 Videos by Comment Count", fontproperties=BANGLA_FONT, fontsize=18)
823
+ ax.set_xlabel("মন্তব্য সংখ্যা", fontproperties=BANGLA_FONT, fontsize=14)
824
+ ax.set_ylabel("ভিডিও শিরোনাম", fontproperties=BANGLA_FONT, fontsize=14)
825
+ yticks = np.arange(len(top_videos.index))
826
+ ax.set_yticks(yticks)
827
+ ax.set_yticklabels(top_videos.index, fontproperties=BANGLA_FONT, fontsize=14)
828
+ for label in ax.get_xticklabels():
829
+ label.set_fontproperties(BANGLA_FONT)
830
+ label.set_fontsize(12)
831
+ for label in ax.get_yticklabels():
832
+ label.set_fontproperties(BANGLA_FONT)
833
+ label.set_fontsize(14)
834
+ legend = ax.get_legend()
835
+ if legend:
836
+ for text in legend.get_texts():
837
+ text.set_fontproperties(BANGLA_FONT)
838
+ plt.tight_layout()
839
+ dashboard_components["yt_top_videos_plot"] = fig_top_videos
840
+
841
+ # Engagement rate per video
842
+ fig_engagement = None
843
+ if videos_df is not None and not videos_df.empty and comments_df is not None and not comments_df.empty:
844
+ if 'video_id' in videos_df.columns and 'video_id' in comments_df.columns:
845
+ try:
846
+ # Count comments per video
847
+ comment_counts = comments_df['video_id'].value_counts().reset_index()
848
+ comment_counts.columns = ['video_id', 'comment_count']
849
+ # Ensure 'comment_count' column exists in videos_df
850
+ merged = videos_df.merge(comment_counts, on='video_id', how='left')
851
+ if 'comment_count' not in merged.columns:
852
+ merged['comment_count'] = 0
853
+ merged['comment_count'] = merged['comment_count'].fillna(0)
854
+ # Calculate engagement rate
855
+ merged['engagement_rate'] = merged['comment_count'] / merged['view_count'].replace(0, 1)
856
+ # Get top 10 videos by engagement
857
+ top_engagement = merged.nlargest(10, 'engagement_rate')
858
+ if not top_engagement.empty:
859
+ fig_engagement, ax = plt.subplots(figsize=(10, 6))
860
+ ax.barh(top_engagement['video_title'], top_engagement['engagement_rate'], color='mediumseagreen')
861
+ ax.set_title("Top 10 Videos by Engagement Rate", fontproperties=BANGLA_FONT, fontsize=18)
862
+ ax.set_xlabel("এনগেজমেন্ট রেট (মন্তব্য/ভিউ)", fontproperties=BANGLA_FONT, fontsize=14)
863
+ ax.set_ylabel("ভিডিও শিরোনাম", fontproperties=BANGLA_FONT, fontsize=14)
864
+ yticks = np.arange(len(top_engagement['video_title']))
865
+ ax.set_yticks(yticks)
866
+ ax.set_yticklabels(top_engagement['video_title'], fontproperties=BANGLA_FONT, fontsize=14)
867
+ for label in ax.get_xticklabels():
868
+ label.set_fontproperties(BANGLA_FONT)
869
+ label.set_fontsize(12)
870
+ for label in ax.get_yticklabels():
871
+ label.set_fontproperties(BANGLA_FONT)
872
+ label.set_fontsize(14)
873
+ legend = ax.get_legend()
874
+ if legend:
875
+ for text in legend.get_texts():
876
+ text.set_fontproperties(BANGLA_FONT)
877
+ plt.tight_layout()
878
+ except Exception as e:
879
+ logger.error(f"Engagement rate calculation failed: {e}")
880
+ dashboard_components["yt_engagement_plot"] = fig_engagement
881
+
882
+ # Comment activity over time
883
+ fig_time_series = None
884
+ if comments_df is not None and not comments_df.empty and 'published_date_comment' in comments_df.columns:
885
+ try:
886
+ comments_df['published_date_comment'] = pd.to_datetime(comments_df['published_date_comment'])
887
+ time_series = comments_df.set_index('published_date_comment').resample('D').size().reset_index()
888
+ time_series.columns = ['date', 'count']
889
+
890
+ if not time_series.empty:
891
+ fig_time_series = gr.LinePlot(
892
+ value=time_series,
893
+ x='date',
894
+ y='count',
895
+ title="Comment Activity Over Time",
896
+ tooltip=['date', 'count'],
897
+ x_title="Date",
898
+ y_title="Number of Comments"
899
+ )
900
+ except Exception as e:
901
+ logger.error(f"Error in comment activity plot: {e}")
902
+ dashboard_components["yt_time_series_plot"] = fig_time_series
903
+
904
+ return dashboard_components
905
+
906
+ # ==============================================================================
907
+ # GRADIO UI DEFINITION
908
+ # ==============================================================================
909
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange"), title=APP_TITLE) as app:
910
+ gr.Markdown(f"# {APP_TITLE}\n*{APP_TAGLINE}*")
911
+
912
+ # --- STATE MANAGEMENT ---
913
+ scraper_results_state = gr.State()
914
+ youtube_results_state = gr.State()
915
+
916
+ with gr.Tabs():
917
+ with gr.TabItem("1. News Scraper", id=0):
918
+ with gr.Row():
919
+ with gr.Column(scale=1):
920
+ gr.Markdown("### Search Criteria")
921
+ search_keywords_textbox = gr.Textbox(
922
+ label="Search Keywords",
923
+ placeholder="e.g., বাংলাদেশ, নির্বাচন",
924
+ info="Keywords to search for in news articles."
925
+ )
926
+ sites_to_search_textbox = gr.Textbox(
927
+ label="Target Sites (Optional, comma-separated)",
928
+ placeholder="e.g., prothomalo.com",
929
+ info="Limit search to specific news sites."
930
+ )
931
+ start_date_textbox = gr.Textbox(
932
+ label="Start Date",
933
+ placeholder="YYYY-MM-DD or 'last week'",
934
+ info="Start date for news scraping."
935
+ )
936
+ end_date_textbox = gr.Textbox(
937
+ label="End Date",
938
+ placeholder="YYYY-MM-DD or 'today'",
939
+ info="End date for news scraping."
940
+ )
941
+
942
+ gr.Markdown("### Scraping Parameters")
943
+ interval_days_slider = gr.Slider(
944
+ 1, 7, 3, step=1,
945
+ label="Days per Interval",
946
+ info="How many days to group each scraping interval."
947
+ )
948
+ max_pages_slider = gr.Slider(
949
+ 1, 10, 5, step=1,
950
+ label="Max Pages per Interval",
951
+ info="Maximum number of pages to fetch per interval."
952
+ )
953
+ filter_keywords_textbox = gr.Textbox(
954
+ label="Filter Keywords (comma-separated, optional)",
955
+ placeholder="e.g., ডাকসু, নোবেল",
956
+ info="Filter results by these keywords."
957
+ )
958
+
959
+ start_scraper_button = gr.Button("Start Scraping & Analysis", variant="primary")
960
+ scraper_progress = gr.Progress()
961
+
962
+ with gr.Column(scale=2):
963
+ scraper_results_df = gr.DataFrame(
964
+ label="Filtered Results",
965
+ interactive=True
966
+ )
967
+ scraper_download_file = gr.File(
968
+ label="Download Filtered Results CSV"
969
+ )
970
+
971
+ with gr.TabItem("2. News Analytics", id=1):
972
+ gr.Markdown("### News Analytics Dashboard")
973
+
974
+ with gr.Group():
975
+ news_summary_card = gr.HTML(
976
+ "<div style='background:#f5f5f5;padding:16px;border-radius:12px;margin-bottom:12px;box-shadow:0 2px 8px #eee;'>"
977
+ "<h3 style='margin:0 0 8px 0;'>Key Findings</h3>"
978
+ "<ul style='margin:0;padding-left:18px;'>"
979
+ "<li><b>Total Articles:</b> <span id='news_total_articles'></span></li>"
980
+ "<li><b>Unique Media:</b> <span id='news_unique_media'></span></li>"
981
+ "<li><b>Date Range:</b> <span id='news_date_range'></span></li>"
982
+ "</ul></div>"
983
+ )
984
+
985
+ kpi_total_articles = gr.HTML()
986
+ kpi_unique_media = gr.HTML()
987
+ kpi_date_range = gr.HTML()
988
+
989
+ with gr.Row():
990
+ with gr.Column():
991
+ dashboard_timeline_plot = gr.LinePlot(
992
+ label="News Volume Timeline"
993
+ )
994
+ with gr.Column():
995
+ dashboard_media_plot = gr.Plot(
996
+ label="Top Media Sources by Article Count"
997
+ )
998
+
999
+ dashboard_wordcloud_plot = gr.Plot(
1000
+ label="Headline Word Cloud"
1001
+ )
1002
+
1003
+ with gr.TabItem("3. YouTube Topic Analysis", id=2):
1004
+ gr.Markdown("## YouTube Topic Analysis")
1005
+
1006
+ with gr.Row():
1007
+ with gr.Column(scale=1):
1008
+ yt_search_keywords = gr.Textbox(
1009
+ label="YouTube Search Keywords",
1010
+ placeholder="e.g., ক্রিকেট",
1011
+ info="Keywords to search for in YouTube videos."
1012
+ )
1013
+ yt_max_videos_slider = gr.Slider(
1014
+ 10, 100, 30, step=5,
1015
+ label="Max Videos for Stats",
1016
+ info="Maximum number of videos to scan for statistics."
1017
+ )
1018
+ yt_num_videos_comments_slider = gr.Slider(
1019
+ 1, 20, 5, step=1,
1020
+ label="Videos for Comments",
1021
+ info="Number of top videos to scrape comments from."
1022
+ )
1023
+ yt_max_comments_slider = gr.Slider(
1024
+ 10, 200, 50, step=10,
1025
+ label="Max Comments per Video",
1026
+ info="Maximum number of comments to fetch per video."
1027
+ )
1028
+ yt_published_after = gr.Textbox(
1029
+ label="Published After (Optional)",
1030
+ placeholder="YYYY-MM-DD",
1031
+ info="Only include videos published after this date."
1032
+ )
1033
+
1034
+ start_youtube_analysis_button = gr.Button(
1035
+ "Start YouTube Analysis",
1036
+ variant="primary"
1037
+ )
1038
+ yt_progress = gr.Progress()
1039
+
1040
+ with gr.Column(scale=2):
1041
+ yt_results_df = gr.DataFrame(
1042
+ label="YouTube Video Results",
1043
+ interactive=True
1044
+ )
1045
+ yt_videos_download_file = gr.File(
1046
+ label="Download YouTube Video Results CSV"
1047
+ )
1048
+ yt_comments_df = gr.DataFrame(
1049
+ label="YouTube Comments Results",
1050
+ interactive=True
1051
+ )
1052
+ yt_comments_download_file = gr.File(
1053
+ label="Download YouTube Comments CSV"
1054
+ )
1055
+ yt_dashboard_html = gr.HTML()
1056
+ with gr.Group():
1057
+ kpi_yt_videos_found = gr.HTML()
1058
+ kpi_yt_views_scanned = gr.HTML()
1059
+ kpi_yt_comments_scraped = gr.HTML()
1060
+ with gr.Row():
1061
+ with gr.Column():
1062
+ yt_channel_plot = gr.Plot(
1063
+ label="Top Channels by Video Volume"
1064
+ )
1065
+ yt_channel_dominance_plot = gr.Plot(
1066
+ label="Channel Dominance by View Count"
1067
+ )
1068
+ with gr.Column():
1069
+ yt_time_series_plot = gr.LinePlot(
1070
+ label="Comment Activity Over Time"
1071
+ )
1072
+ with gr.Row():
1073
+ with gr.Column():
1074
+ yt_top_videos_plot = gr.Plot(
1075
+ label="Top Videos by Comment Count"
1076
+ )
1077
+ yt_content_quadrant_plot = gr.Plot(
1078
+ label="Content Performance Quadrant"
1079
+ )
1080
+ with gr.Column():
1081
+ yt_engagement_plot = gr.Plot(
1082
+ label="Top Videos by Engagement Rate"
1083
+ )
1084
+ yt_wordcloud_plot = gr.Plot(
1085
+ label="Bengali Word Cloud from Comments"
1086
+ )
1087
+ yt_detailed_summary = gr.HTML()
1088
+
1089
+ # --- EVENT HANDLERS ---
1090
+ def scraper_button_handler(search_keywords, sites, start_date, end_date, interval, max_pages, filter_keys):
1091
+ """Handle news scraper button click event."""
1092
+ try:
1093
+ df, filtered_df = run_news_scraper_pipeline(
1094
+ search_keywords, sites, start_date, end_date,
1095
+ interval, max_pages, filter_keys
1096
+ )
1097
+
1098
+ # Update the state with the full results
1099
+ scraper_results_state = df
1100
+
1101
+ # Generate dashboard visualizations
1102
+ dashboard = generate_scraper_dashboard(df)
1103
+
1104
+ # Prepare download file for news results
1105
+ if not df.empty:
1106
+ csv_path = "news_results.csv"
1107
+ df.to_csv(csv_path, index=False)
1108
+ scraper_download_file = gr.File(value=csv_path, visible=True)
1109
+ else:
1110
+ scraper_download_file = gr.File(visible=False)
1111
+
1112
+ return (
1113
+ filtered_df,
1114
+ scraper_download_file,
1115
+ dashboard["kpi_total_articles"],
1116
+ dashboard["kpi_unique_media"],
1117
+ dashboard["kpi_date_range"],
1118
+ dashboard["dashboard_timeline_plot"],
1119
+ dashboard["dashboard_media_plot"],
1120
+ dashboard["dashboard_wordcloud_plot"]
1121
+ )
1122
+ except Exception as e:
1123
+ logger.error(f"Error in scraper button handler: {str(e)}")
1124
+ gr.Error(f"An error occurred during scraping: {str(e)}")
1125
+ # Return empty values to reset the UI
1126
+ return (
1127
+ pd.DataFrame(),
1128
+ gr.File(visible=False),
1129
+ gr.HTML(""), gr.HTML(""), gr.HTML(""),
1130
+ None, None, None
1131
+ )
1132
+
1133
+ start_scraper_button.click(
1134
+ fn=scraper_button_handler,
1135
+ inputs=[
1136
+ search_keywords_textbox,
1137
+ sites_to_search_textbox,
1138
+ start_date_textbox,
1139
+ end_date_textbox,
1140
+ interval_days_slider,
1141
+ max_pages_slider,
1142
+ filter_keywords_textbox
1143
+ ],
1144
+ outputs=[
1145
+ scraper_results_df,
1146
+ scraper_download_file,
1147
+ kpi_total_articles,
1148
+ kpi_unique_media,
1149
+ kpi_date_range,
1150
+ dashboard_timeline_plot,
1151
+ dashboard_media_plot,
1152
+ dashboard_wordcloud_plot
1153
+ ]
1154
+ )
1155
+
1156
+ def youtube_button_handler(keywords, max_videos, num_comments_videos, max_comments, published_after):
1157
+ """Handle YouTube analysis button click event."""
1158
+ try:
1159
+ videos_df, comments_df, summary_html = run_youtube_analysis_pipeline(
1160
+ api_key=None,
1161
+ query=keywords,
1162
+ max_videos_for_stats=max_videos,
1163
+ num_videos_for_comments=num_comments_videos,
1164
+ max_comments_per_video=max_comments,
1165
+ published_after=published_after
1166
+ )
1167
+ # Update the state with the results
1168
+ youtube_results_state = (videos_df, comments_df)
1169
+ # Prepare download files for YouTube results
1170
+ yt_videos_csv = "youtube_videos.csv"
1171
+ yt_comments_csv = "youtube_comments.csv"
1172
+ if not videos_df.empty:
1173
+ videos_df.to_csv(yt_videos_csv, index=False)
1174
+ yt_videos_download_file = gr.File(value=yt_videos_csv, visible=True)
1175
+ else:
1176
+ yt_videos_download_file = gr.File(visible=False)
1177
+ # For comments, add video title and channel if not present
1178
+ if not comments_df.empty:
1179
+ if "video_title" not in comments_df.columns and "video_id" in comments_df.columns:
1180
+ # Map video title from videos_df
1181
+ title_map = videos_df.set_index("video_id")["video_title"].to_dict()
1182
+ comments_df["video_title"] = comments_df["video_id"].map(title_map)
1183
+ if "channel" not in comments_df.columns and "channel_title" in comments_df.columns:
1184
+ comments_df["channel"] = comments_df["channel_title"]
1185
+ comments_df.to_csv(yt_comments_csv, index=False)
1186
+ yt_comments_download_file = gr.File(value=yt_comments_csv, visible=True)
1187
+ else:
1188
+ yt_comments_download_file = gr.File(visible=False)
1189
+ # Generate dashboard visualizations
1190
+ dashboard = generate_youtube_dashboard(videos_df, comments_df)
1191
+ return (
1192
+ videos_df,
1193
+ yt_videos_download_file,
1194
+ comments_df,
1195
+ yt_comments_download_file,
1196
+ summary_html,
1197
+ dashboard["kpi_yt_videos_found"],
1198
+ dashboard["kpi_yt_views_scanned"],
1199
+ dashboard["kpi_yt_comments_scraped"],
1200
+ dashboard["yt_channel_plot"],
1201
+ dashboard["yt_channel_dominance_plot"],
1202
+ dashboard["yt_time_series_plot"],
1203
+ dashboard["yt_top_videos_plot"],
1204
+ dashboard["yt_content_quadrant_plot"],
1205
+ dashboard["yt_engagement_plot"],
1206
+ dashboard["yt_wordcloud_plot"],
1207
+ dashboard["yt_detailed_summary"]
1208
+ )
1209
+ except Exception as e:
1210
+ logger.error(f"Error in YouTube button handler: {str(e)}")
1211
+ gr.Error(f"An error occurred during YouTube analysis: {str(e)}")
1212
+ # Return empty values to reset the UI (16 outputs)
1213
+ return (
1214
+ pd.DataFrame(), # yt_results_df
1215
+ gr.File(visible=False), # yt_videos_download_file
1216
+ pd.DataFrame(), # yt_comments_df
1217
+ gr.File(visible=False), # yt_comments_download_file
1218
+ gr.HTML(""), # yt_dashboard_html
1219
+ gr.HTML(""), # kpi_yt_videos_found
1220
+ gr.HTML(""), # kpi_yt_views_scanned
1221
+ gr.HTML(""), # kpi_yt_comments_scraped
1222
+ None, # yt_channel_plot
1223
+ None, # yt_channel_dominance_plot
1224
+ None, # yt_time_series_plot
1225
+ None, # yt_top_videos_plot
1226
+ None, # yt_content_quadrant_plot
1227
+ None, # yt_engagement_plot
1228
+ None, # yt_wordcloud_plot
1229
+ gr.HTML("") # yt_detailed_summary
1230
+ )
1231
+
1232
+ start_youtube_analysis_button.click(
1233
+ fn=youtube_button_handler,
1234
+ inputs=[
1235
+ yt_search_keywords,
1236
+ yt_max_videos_slider,
1237
+ yt_num_videos_comments_slider,
1238
+ yt_max_comments_slider,
1239
+ yt_published_after
1240
+ ],
1241
+ outputs=[
1242
+ yt_results_df,
1243
+ yt_videos_download_file,
1244
+ yt_comments_df,
1245
+ yt_comments_download_file,
1246
+ yt_dashboard_html,
1247
+ kpi_yt_videos_found,
1248
+ kpi_yt_views_scanned,
1249
+ kpi_yt_comments_scraped,
1250
+ yt_channel_plot,
1251
+ yt_channel_dominance_plot,
1252
+ yt_time_series_plot,
1253
+ yt_top_videos_plot,
1254
+ yt_content_quadrant_plot,
1255
+ yt_engagement_plot,
1256
+ yt_wordcloud_plot,
1257
+ yt_detailed_summary
1258
+ ]
1259
+ )
1260
+
1261
+ # ==============================================================================
1262
+ # LAUNCH THE APP
1263
+ # ==============================================================================
1264
+ custom_css = """
1265
+ body, .gradio-container {
1266
+ background: #181a20 !important;
1267
+ font-family: 'Inter', 'Noto Sans', sans-serif;
1268
+ }
1269
+ .gr-card {
1270
+ background: #23263a;
1271
+ border-radius: 18px;
1272
+ box-shadow: 0 4px 24px rgba(0,0,0,0.12);
1273
+ padding: 24px;
1274
+ margin-bottom: 24px;
1275
+ }
1276
+ .gr-title {
1277
+ color: #fff;
1278
+ font-size: 2.2rem;
1279
+ font-weight: 700;
1280
+ margin-bottom: 12px;
1281
+ }
1282
+ .gr-metric {
1283
+ color: #22d3ee;
1284
+ font-size: 2.5rem;
1285
+ font-weight: 800;
1286
+ }
1287
+ .gr-label {
1288
+ color: #94a3b8;
1289
+ font-size: 1.1rem;
1290
+ margin-bottom: 6px;
1291
+ }
1292
+ .gradio-row, .gradio-column {
1293
+ background: transparent !important;
1294
+ }
1295
+ .gradio-button {
1296
+ border-radius: 8px !important;
1297
+ background: linear-gradient(90deg,#3b82f6,#22d3ee) !important;
1298
+ color: #fff !important;
1299
+ font-weight: 600 !important;
1300
+ box-shadow: 0 2px 8px rgba(34,211,238,0.08);
1301
+ transition: background 0.2s;
1302
+ }
1303
+ .gradio-button:hover {
1304
+ background: linear-gradient(90deg,#22d3ee,#3b82f6) !important;
1305
+ }
1306
+ .gradio-markdown h1, .gradio-markdown h2, .gradio-markdown h3 {
1307
+ color: #fff !important;
1308
+ }
1309
+ .gradio-markdown {
1310
+ color: #cbd5e1 !important;
1311
+ }
1312
+ """
1313
+
1314
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange"), title=APP_TITLE, css=custom_css) as app:
1315
+ gr.HTML("""
1316
+ <div class='gr-card' style='margin-bottom:32px;'>
1317
+ <div class='gr-title'>Social Perception Analyzer</div>
1318
+ <div style='color:#94a3b8;font-size:1.2rem;margin-bottom:8px;'>Prepared for the Policymakers of Bangladesh Nationalist Party (BNP)</div>
1319
+ <div style='color:#22d3ee;font-size:1rem;'>Developed by CDSR</div>
1320
+ </div>
1321
+ """)
1322
+ # --- STATE MANAGEMENT ---
1323
+ scraper_results_state = gr.State()
1324
+ youtube_results_state = gr.State()
1325
+
1326
+ with gr.Tabs():
1327
+ with gr.TabItem("1. News Scraper", id=0):
1328
+ gr.HTML("<div class='gr-card' style='margin-bottom:24px;'><h2>News Scraper</h2><p>Search and filter news articles from top Bangladeshi sources. Use advanced filters and download results.</p></div>")
1329
+ with gr.Row():
1330
+ with gr.Column(scale=1):
1331
+ gr.HTML("<div class='gr-card'><h3>Search Criteria</h3></div>")
1332
+ search_keywords_textbox = gr.Textbox(
1333
+ label="Search Keywords",
1334
+ placeholder="e.g., বিএনপি সমাবেশ",
1335
+ info="Keywords to search for in news articles."
1336
+ )
1337
+ sites_to_search_textbox = gr.Textbox(
1338
+ label="Target Sites (Optional, comma-separated)",
1339
+ placeholder="e.g., prothomalo.com",
1340
+ info="Limit search to specific news sites."
1341
+ )
1342
+ start_date_textbox = gr.Textbox(
1343
+ label="Start Date",
1344
+ placeholder="YYYY-MM-DD or 'last week'",
1345
+ info="Start date for news scraping."
1346
+ )
1347
+ end_date_textbox = gr.Textbox(
1348
+ label="End Date",
1349
+ placeholder="YYYY-MM-DD or 'today'",
1350
+ info="End date for news scraping."
1351
+ )
1352
+ gr.HTML("<div class='gr-card'><h3>Scraping Parameters</h3></div>")
1353
+ interval_days_slider = gr.Slider(
1354
+ 1, 7, 3, step=1,
1355
+ label="Days per Interval",
1356
+ info="How many days to group each scraping interval."
1357
+ )
1358
+ max_pages_slider = gr.Slider(
1359
+ 1, 10, 5, step=1,
1360
+ label="Max Pages per Interval",
1361
+ info="Maximum number of pages to fetch per interval."
1362
+ )
1363
+ filter_keywords_textbox = gr.Textbox(
1364
+ label="Filter Keywords (comma-separated, optional)",
1365
+ placeholder="e.g., নির্বাচন, সরকার",
1366
+ info="Filter results by these keywords."
1367
+ )
1368
+ start_scraper_button = gr.Button("Start Scraping & Analysis", variant="primary")
1369
+ scraper_progress = gr.Progress()
1370
+ with gr.Column(scale=2):
1371
+ gr.HTML("<div class='gr-card'><h3>Filtered Results</h3></div>")
1372
+ scraper_results_df = gr.DataFrame(
1373
+ label="Filtered Results",
1374
+ interactive=True
1375
+ )
1376
+ scraper_download_file = gr.File(
1377
+ label="Download Filtered Results CSV"
1378
+ )
1379
+ with gr.TabItem("2. News Analytics", id=1):
1380
+ gr.HTML("<div class='gr-card' style='margin-bottom:24px;'><h2>News Analytics Dashboard</h2><p>Visualize key metrics, trends, and top sources from scraped news data. All plots and metrics update dynamically.</p></div>")
1381
+ with gr.Row():
1382
+ with gr.Column(scale=1):
1383
+ gr.HTML("<div class='gr-card'><h3>Key Metrics</h3></div>")
1384
+ kpi_total_articles = gr.HTML()
1385
+ kpi_unique_media = gr.HTML()
1386
+ kpi_date_range = gr.HTML()
1387
+ with gr.Column(scale=2):
1388
+ gr.HTML("<div class='gr-card'><h3>Trends</h3></div>")
1389
+ dashboard_timeline_plot = gr.LinePlot(
1390
+ label="News Volume Timeline"
1391
+ )
1392
+ with gr.Row():
1393
+ with gr.Column(scale=1):
1394
+ gr.HTML("<div class='gr-card'><h3>Top Sources</h3></div>")
1395
+ dashboard_media_plot = gr.Plot(
1396
+ label="Top Media Sources by Article Count"
1397
+ )
1398
+ with gr.Column(scale=1):
1399
+ gr.HTML("<div class='gr-card'><h3>Headline Word Cloud</h3></div>")
1400
+ dashboard_wordcloud_plot = gr.Plot(
1401
+ label="Headline Word Cloud"
1402
+ )
1403
+ with gr.TabItem("3. YouTube Topic Analysis", id=2):
1404
+ gr.HTML("<div class='gr-card' style='margin-bottom:24px;'><h2>YouTube Topic Analysis</h2><p>Analyze YouTube video trends, engagement, and comment activity for your search topics.</p></div>")
1405
+ with gr.Row():
1406
+ with gr.Column(scale=1):
1407
+ gr.HTML("<div class='gr-card'><h3>Search Criteria</h3></div>")
1408
+ yt_search_keywords = gr.Textbox(
1409
+ label="YouTube Search Keywords",
1410
+ placeholder="e.g., BNP Rally",
1411
+ info="Keywords to search for in YouTube videos."
1412
+ )
1413
+ yt_max_videos_slider = gr.Slider(
1414
+ 10, 100, 30, step=5,
1415
+ label="Max Videos for Stats",
1416
+ info="Maximum number of videos to scan for statistics."
1417
+ )
1418
+ yt_num_videos_comments_slider = gr.Slider(
1419
+ 1, 20, 5, step=1,
1420
+ label="Videos for Comments",
1421
+ info="Number of top videos to scrape comments from."
1422
+ )
1423
+ yt_max_comments_slider = gr.Slider(
1424
+ 10, 200, 50, step=10,
1425
+ label="Max Comments per Video",
1426
+ info="Maximum number of comments to fetch per video."
1427
+ )
1428
+ yt_published_after = gr.Textbox(
1429
+ label="Published After (Optional)",
1430
+ placeholder="YYYY-MM-DD",
1431
+ info="Only include videos published after this date."
1432
+ )
1433
+ start_youtube_analysis_button = gr.Button(
1434
+ "Start YouTube Analysis",
1435
+ variant="primary"
1436
+ )
1437
+ yt_progress = gr.Progress()
1438
+ with gr.Column(scale=2):
1439
+ gr.HTML("<div class='gr-card'><h3>Video Results</h3></div>")
1440
+ yt_results_df = gr.DataFrame(
1441
+ label="YouTube Video Results",
1442
+ interactive=True
1443
+ )
1444
+ yt_videos_download_file = gr.File(
1445
+ label="Download YouTube Video Results CSV"
1446
+ )
1447
+ yt_comments_df = gr.DataFrame(
1448
+ label="YouTube Comments Results",
1449
+ interactive=True
1450
+ )
1451
+ yt_comments_download_file = gr.File(
1452
+ label="Download YouTube Comments CSV"
1453
+ )
1454
+ yt_dashboard_html = gr.HTML()
1455
+ with gr.Row():
1456
+ with gr.Column(scale=1):
1457
+ gr.HTML("<div class='gr-card'><h3>Top Channels & Engagement</h3></div>")
1458
+ kpi_yt_videos_found = gr.HTML()
1459
+ kpi_yt_views_scanned = gr.HTML()
1460
+ kpi_yt_comments_scraped = gr.HTML()
1461
+ yt_channel_plot = gr.Plot(
1462
+ label="Top Channels by Video Volume"
1463
+ )
1464
+ yt_channel_dominance_plot = gr.Plot(
1465
+ label="Channel Dominance by View Count"
1466
+ )
1467
+ yt_top_videos_plot = gr.Plot(
1468
+ label="Top Videos by Comment Count"
1469
+ )
1470
+ yt_content_quadrant_plot = gr.Plot(
1471
+ label="Content Performance Quadrant"
1472
+ )
1473
+ yt_engagement_plot = gr.Plot(
1474
+ label="Top Videos by Engagement Rate"
1475
+ )
1476
+ with gr.Column(scale=1):
1477
+ gr.HTML("<div class='gr-card'><h3>Comment Activity & Word Cloud</h3></div>")
1478
+ yt_time_series_plot = gr.LinePlot(
1479
+ label="Comment Activity Over Time"
1480
+ )
1481
+ yt_wordcloud_plot = gr.Plot(
1482
+ label="Bengali Word Cloud from Comments"
1483
+ )
1484
+ yt_detailed_summary = gr.HTML()
1485
+ # --- EVENT HANDLERS ---
1486
+ def scraper_button_handler(search_keywords, sites, start_date, end_date, interval, max_pages, filter_keys):
1487
+ """Handle news scraper button click event."""
1488
+ try:
1489
+ df, filtered_df = run_news_scraper_pipeline(
1490
+ search_keywords, sites, start_date, end_date,
1491
+ interval, max_pages, filter_keys
1492
+ )
1493
+ scraper_results_state = df
1494
+ dashboard = generate_scraper_dashboard(df)
1495
+ if not df.empty:
1496
+ csv_path = "news_results.csv"
1497
+ df.to_csv(csv_path, index=False)
1498
+ scraper_download_file = gr.File(value=csv_path, visible=True)
1499
+ else:
1500
+ scraper_download_file = gr.File(visible=False)
1501
+ return (
1502
+ filtered_df,
1503
+ scraper_download_file,
1504
+ dashboard["kpi_total_articles"],
1505
+ dashboard["kpi_unique_media"],
1506
+ dashboard["kpi_date_range"],
1507
+ dashboard["dashboard_timeline_plot"],
1508
+ dashboard["dashboard_media_plot"],
1509
+ dashboard["dashboard_wordcloud_plot"]
1510
+ )
1511
+ except Exception as e:
1512
+ logger.error(f"Error in scraper button handler: {str(e)}")
1513
+ gr.Error(f"An error occurred during scraping: {str(e)}")
1514
+ return (
1515
+ pd.DataFrame(),
1516
+ gr.File(visible=False),
1517
+ gr.HTML(""), gr.HTML(""), gr.HTML(""),
1518
+ None, None, None
1519
+ )
1520
+
1521
+ start_scraper_button.click(
1522
+ fn=scraper_button_handler,
1523
+ inputs=[
1524
+ search_keywords_textbox,
1525
+ sites_to_search_textbox,
1526
+ start_date_textbox,
1527
+ end_date_textbox,
1528
+ interval_days_slider,
1529
+ max_pages_slider,
1530
+ filter_keywords_textbox
1531
+ ],
1532
+ outputs=[
1533
+ scraper_results_df,
1534
+ scraper_download_file,
1535
+ kpi_total_articles,
1536
+ kpi_unique_media,
1537
+ kpi_date_range,
1538
+ dashboard_timeline_plot,
1539
+ dashboard_media_plot,
1540
+ dashboard_wordcloud_plot
1541
+ ]
1542
+ )
1543
+
1544
+ def youtube_button_handler(keywords, max_videos, num_comments_videos, max_comments, published_after):
1545
+ """Handle YouTube analysis button click event."""
1546
+ try:
1547
+ videos_df, comments_df, summary_html = run_youtube_analysis_pipeline(
1548
+ api_key=None,
1549
+ query=keywords,
1550
+ max_videos_for_stats=max_videos,
1551
+ num_videos_for_comments=num_comments_videos,
1552
+ max_comments_per_video=max_comments,
1553
+ published_after=published_after
1554
+ )
1555
+ youtube_results_state = (videos_df, comments_df)
1556
+ yt_videos_csv = "youtube_videos.csv"
1557
+ yt_comments_csv = "youtube_comments.csv"
1558
+ if not videos_df.empty:
1559
+ videos_df.to_csv(yt_videos_csv, index=False)
1560
+ yt_videos_download_file = gr.File(value=yt_videos_csv, visible=True)
1561
+ else:
1562
+ yt_videos_download_file = gr.File(visible=False)
1563
+ if not comments_df.empty:
1564
+ if "video_title" not in comments_df.columns and "video_id" in comments_df.columns:
1565
+ title_map = videos_df.set_index("video_id")["video_title"].to_dict()
1566
+ comments_df["video_title"] = comments_df["video_id"].map(title_map)
1567
+ if "channel" not in comments_df.columns and "channel_title" in comments_df.columns:
1568
+ comments_df["channel"] = comments_df["channel_title"]
1569
+ comments_df.to_csv(yt_comments_csv, index=False)
1570
+ yt_comments_download_file = gr.File(value=yt_comments_csv, visible=True)
1571
+ else:
1572
+ yt_comments_download_file = gr.File(visible=False)
1573
+ dashboard = generate_youtube_dashboard(videos_df, comments_df)
1574
+ return (
1575
+ videos_df,
1576
+ yt_videos_download_file,
1577
+ comments_df,
1578
+ yt_comments_download_file,
1579
+ summary_html,
1580
+ dashboard["kpi_yt_videos_found"],
1581
+ dashboard["kpi_yt_views_scanned"],
1582
+ dashboard["kpi_yt_comments_scraped"],
1583
+ dashboard["yt_channel_plot"],
1584
+ dashboard["yt_channel_dominance_plot"],
1585
+ dashboard["yt_time_series_plot"],
1586
+ dashboard["yt_top_videos_plot"],
1587
+ dashboard["yt_content_quadrant_plot"],
1588
+ dashboard["yt_engagement_plot"],
1589
+ dashboard["yt_wordcloud_plot"],
1590
+ dashboard["yt_detailed_summary"]
1591
+ )
1592
+ except Exception as e:
1593
+ logger.error(f"Error in YouTube button handler: {str(e)}")
1594
+ gr.Error(f"An error occurred during YouTube analysis: {str(e)}")
1595
+ return (
1596
+ pd.DataFrame(), # yt_results_df
1597
+ gr.File(visible=False), # yt_videos_download_file
1598
+ pd.DataFrame(), # yt_comments_df
1599
+ gr.File(visible=False), # yt_comments_download_file
1600
+ gr.HTML(""), # yt_dashboard_html
1601
+ gr.HTML(""), # kpi_yt_videos_found
1602
+ gr.HTML(""), # kpi_yt_views_scanned
1603
+ gr.HTML(""), # kpi_yt_comments_scraped
1604
+ None, # yt_channel_plot
1605
+ None, # yt_channel_dominance_plot
1606
+ None, # yt_time_series_plot
1607
+ None, # yt_top_videos_plot
1608
+ None, # yt_content_quadrant_plot
1609
+ None, # yt_engagement_plot
1610
+ None, # yt_wordcloud_plot
1611
+ gr.HTML("") # yt_detailed_summary
1612
+ )
1613
+
1614
+ start_youtube_analysis_button.click(
1615
+ fn=youtube_button_handler,
1616
+ inputs=[
1617
+ yt_search_keywords,
1618
+ yt_max_videos_slider,
1619
+ yt_num_videos_comments_slider,
1620
+ yt_max_comments_slider,
1621
+ yt_published_after
1622
+ ],
1623
+ outputs=[
1624
+ yt_results_df,
1625
+ yt_videos_download_file,
1626
+ yt_comments_df,
1627
+ yt_comments_download_file,
1628
+ yt_dashboard_html,
1629
+ kpi_yt_videos_found,
1630
+ kpi_yt_views_scanned,
1631
+ kpi_yt_comments_scraped,
1632
+ yt_channel_plot,
1633
+ yt_channel_dominance_plot,
1634
+ yt_time_series_plot,
1635
+ yt_top_videos_plot,
1636
+ yt_content_quadrant_plot,
1637
+ yt_engagement_plot,
1638
+ yt_wordcloud_plot,
1639
+ yt_detailed_summary
1640
+ ]
1641
+ )
1642
+ if __name__ == "__main__":
1643
+ app.launch(debug=True, share=True)