Arjon07CSE commited on
Commit
d32b198
·
verified ·
1 Parent(s): a60fb4c

updated word cloud

Browse files
Files changed (1) hide show
  1. app.py +304 -167
app.py CHANGED
@@ -8,7 +8,6 @@ import gradio as gr
8
  import pandas as pd
9
  import numpy as np
10
  import torch
11
- import re
12
  import sqlite3
13
  import json
14
  import logging
@@ -21,15 +20,12 @@ from io import StringIO
21
  from datetime import datetime, timezone
22
  from logging.handlers import RotatingFileHandler
23
 
24
- # --- APIs and Web Scraping ---
25
- from googleapiclient.discovery import build
26
- from googleapiclient.errors import HttpError
27
- from GoogleNews import GoogleNews
28
- from urllib.error import HTTPError
29
  import dateparser
30
 
31
  # --- NLP & Machine Learning ---
32
- from transformers import pipeline, BitsAndBytesConfig
33
  from sentence_transformers import SentenceTransformer
34
  from huggingface_hub.utils import HfHubHTTPError
35
 
@@ -54,8 +50,8 @@ logger.info("Application starting up.")
54
 
55
  # --- APPLICATION CONFIGURATION ---
56
  APP_TITLE = "Social Perception Analyzer"
57
- APP_TAGLINE = "Prepared for the Policymakers of Bangladesh Nationalist Party (BNP)"
58
- APP_FOOTER = "Developed by CDSR"
59
 
60
  # --- FONT CONFIGURATION ---
61
  FONT_PATH = 'NotoSansBengali-Regular.ttf'
@@ -69,6 +65,30 @@ except OSError:
69
 
70
  # ==============================================================================
71
  # CORE HELPER FUNCTIONS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  # ==============================================================================
73
 
74
  BANGLA_STOP_WORDS = [
@@ -101,34 +121,23 @@ def get_dynamic_time_agg(start_date, end_date):
101
 
102
  # ==============================================================================
103
  # ML MODEL MANAGEMENT
104
- # ==============================================================================
105
-
106
 
107
- SENTIMENT_MODEL_ID = 'ahs95/banglabert-sentiment-analysis'
108
- MODELS = {"sentiment_pipeline": None}
109
-
110
- def _load_pipeline_with_retry(task, model_id, retries=3):
111
- logger.info(f"Initializing {task} pipeline for model: {model_id}")
112
- for attempt in range(retries):
113
  try:
114
- device = 0 if torch.cuda.is_available() else -1
115
- if device == -1: gr.Warning(f"{model_id} will run on CPU and may be very slow.")
116
- pipe = pipeline(task, model=model_id, device=device)
117
- logger.info(f"Pipeline '{task}' loaded successfully.")
118
- return pipe
119
- except (HfHubHTTPError, requests.exceptions.ConnectionError) as e:
120
- logger.warning(f"Network error on loading {model_id} (Attempt {attempt + 1}/{retries}): {e}")
121
- if attempt < retries - 1: time.sleep(5)
122
- else: raise gr.Error(f"Failed to download model '{model_id}' after {retries} attempts. Check network.")
123
  except Exception as e:
124
- logger.error(f"An unexpected error occurred while loading {model_id}: {e}")
125
- raise gr.Error(f"Could not initialize model '{model_id}'. Error: {e}")
126
- return None
 
127
 
128
- def get_sentiment_pipeline():
129
- if MODELS["sentiment_pipeline"] is None:
130
- MODELS["sentiment_pipeline"] = _load_pipeline_with_retry("sentiment-analysis", SENTIMENT_MODEL_ID)
131
- return MODELS["sentiment_pipeline"]
132
 
133
  # ==============================================================================
134
  # NEWS SCRAPER BACKEND
@@ -148,35 +157,35 @@ def run_news_scraper_pipeline(search_keywords, sites, start_date_str, end_date_s
148
 
149
  all_articles, current_dt = [], start_dt
150
  while current_dt <= end_dt:
151
- interval_end_dt = min(current_dt + pd.Timedelta(days=interval - 1), end_dt)
152
- start_str, end_str = current_dt.strftime('%Y-%m-%d'), interval_end_dt.strftime('%Y-%m-%d')
153
- progress(0, desc=f"Fetching news from {start_str} to {end_str}")
154
-
155
- site_query = f"({' OR '.join(['site:' + s.strip() for s in sites.split(',') if s.strip()])})" if sites else ""
156
- final_query = f'"{search_keywords}" {site_query} after:{start_str} before:{end_str}'
157
-
158
- googlenews = GoogleNews(lang='bn', region='BD')
159
- googlenews.search(final_query)
160
-
161
- for page in range(1, max_pages + 1):
162
- try:
163
- results = googlenews.results()
164
- if not results: break
165
- all_articles.extend(results)
166
- if page < max_pages:
167
- googlenews.getpage(page + 1)
168
- time.sleep(random.uniform(2, 5))
169
- except HTTPError as e:
170
- if e.code == 429:
171
- wait_time = random.uniform(15, 30)
172
- gr.Warning(f"Rate limited by Google News. Pausing for {wait_time:.0f} seconds.")
173
- time.sleep(wait_time)
174
- else:
175
- logger.error(f"HTTP Error fetching news: {e}"); break
176
- except Exception as e:
177
- logger.error(f"An error occurred fetching news: {e}"); break
178
-
179
- current_dt += pd.Timedelta(days=interval)
180
 
181
  if not all_articles: return pd.DataFrame(), pd.DataFrame()
182
 
@@ -185,8 +194,40 @@ def run_news_scraper_pipeline(search_keywords, sites, start_date_str, end_date_s
185
  df.dropna(subset=['published_date', 'title'], inplace=True)
186
 
187
  if filter_keys and filter_keys.strip():
188
- keywords = [k.strip().lower() for k in filter_keys.split(',')]
189
- mask = df.apply(lambda row: any(key in str(row['title']).lower() or key in str(row['desc']).lower() for key in keywords), axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  df = df[mask]
191
 
192
  return df, df[['published_date', 'title', 'media', 'desc', 'link']].sort_values(by='published_date', ascending=False)
@@ -235,7 +276,8 @@ def _scrape_single_video_comments(youtube_service, video_id, max_comments):
235
  return comments_list
236
 
237
  def run_youtube_analysis_pipeline(api_key, query, max_videos_for_stats, num_videos_for_comments, max_comments_per_video, published_after, progress=gr.Progress()):
238
- if not api_key: raise gr.Error("YouTube API Key is required.")
 
239
  if not query: raise gr.Error("Search Keywords are required.")
240
  try:
241
  youtube = build('youtube', 'v3', developerKey=api_key)
@@ -307,22 +349,7 @@ def set_plot_style():
307
  plt.rcParams['figure.dpi'] = 100
308
 
309
  def run_sentiment_analysis(df: pd.DataFrame, text_column: str, progress=gr.Progress()):
310
- if text_column not in df.columns: return df
311
- sentiment_pipeline = get_sentiment_pipeline()
312
- if not sentiment_pipeline:
313
- gr.Warning("Sentiment model failed to load. Skipping analysis.")
314
- return df
315
-
316
- texts = df[text_column].dropna().tolist()
317
- if not texts: return df
318
-
319
- progress(0, desc="Running sentiment analysis...")
320
- results = sentiment_pipeline(texts, batch_size=32)
321
-
322
- text_to_sentiment = {text: result for text, result in zip(texts, results)}
323
- df['sentiment_label'] = df[text_column].map(lambda x: text_to_sentiment.get(x, {}).get('label'))
324
- df['sentiment_score'] = df[text_column].map(lambda x: text_to_sentiment.get(x, {}).get('score'))
325
- logger.info("Sentiment analysis complete.")
326
  return df
327
 
328
  def generate_scraper_dashboard(df: pd.DataFrame):
@@ -343,11 +370,41 @@ def generate_scraper_dashboard(df: pd.DataFrame):
343
  ax.set_yticklabels(media_counts.index, fontproperties=BANGLA_FONT); ax.set_xlabel("Article Count"); plt.tight_layout()
344
 
345
  text = " ".join(title for title in df['title'].astype(str))
 
 
 
346
  fig_wc = None
347
  try:
348
- wc = WordCloud(font_path=FONT_PATH, width=800, height=400, background_color='white', stopwords=BANGLA_STOP_WORDS, collocations=False).generate(text)
349
- fig_wc, ax = plt.subplots(figsize=(10, 5)); ax.imshow(wc, interpolation='bilinear'); ax.axis("off")
350
- except Exception as e: logger.error(f"WordCloud failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
 
352
  return {
353
  kpi_total_articles: str(total_articles), kpi_unique_media: str(unique_media), kpi_date_range: date_range_str,
@@ -356,74 +413,144 @@ def generate_scraper_dashboard(df: pd.DataFrame):
356
  }
357
 
358
  def generate_sentiment_dashboard(df: pd.DataFrame):
359
- updates = {sentiment_dashboard_tab: gr.update(visible=False)}
360
- set_plot_style()
361
-
362
- if 'sentiment_label' in df.columns:
363
- sentiment_counts = df['sentiment_label'].value_counts()
364
- fig_pie, fig_media_sent = None, None
365
- if not sentiment_counts.empty:
366
- fig_pie, ax = plt.subplots(figsize=(6, 6)); ax.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%', startangle=90, colors=['#66c2a5', '#fc8d62', '#8da0cb'])
367
- ax.set_title("Overall Sentiment Distribution", fontproperties=BANGLA_FONT); ax.axis('equal')
368
-
369
- top_media = df['media'].value_counts().nlargest(10).index
370
- media_sentiment = pd.crosstab(df[df['media'].isin(top_media)]['media'], df['sentiment_label'], normalize='index').mul(100)
371
- if not media_sentiment.empty:
372
- fig_media_sent, ax = plt.subplots(figsize=(10, 7)); media_sentiment.plot(kind='barh', stacked=True, ax=ax, colormap='viridis')
373
- ax.set_title("Sentiment by Top Media Sources", fontproperties=BANGLA_FONT); ax.set_yticklabels(media_sentiment.index, fontproperties=BANGLA_FONT); plt.tight_layout()
374
-
375
- updates.update({sentiment_pie_plot: fig_pie, sentiment_by_media_plot: fig_media_sent, sentiment_dashboard_tab: gr.update(visible=True)})
376
- return updates
377
 
378
  def generate_youtube_dashboard(videos_df, comments_df):
379
  set_plot_style()
380
  kpis = {
381
- kpi_yt_videos_found: f"{len(videos_df):,}" if videos_df is not None else "0",
382
- kpi_yt_views_scanned: f"{videos_df['view_count'].sum():,}" if videos_df is not None else "0",
383
- kpi_yt_comments_scraped: f"{len(comments_df):,}" if comments_df is not None else "0"
384
  }
385
 
386
- channel_counts = videos_df['channel'].value_counts().nlargest(15).sort_values()
387
- fig_channels, ax = plt.subplots(figsize=(8, 6))
388
- if not channel_counts.empty:
389
- channel_counts.plot(kind='barh', ax=ax, color='coral'); ax.set_title("Top 15 Channels by Video Volume", fontproperties=BANGLA_FONT); ax.set_yticklabels(channel_counts.index, fontproperties=BANGLA_FONT); plt.tight_layout()
390
-
391
- fig_wc, fig_pie, fig_sentiment_video = None, None, None
 
 
 
392
  if comments_df is not None and not comments_df.empty:
393
- text = " ".join(comment for comment in comments_df['comment_text'].astype(str))
394
- try:
395
- wc = WordCloud(font_path=FONT_PATH, width=800, height=400, background_color='white', stopwords=BANGLA_STOP_WORDS, collocations=False).generate(text)
396
- fig_wc, ax = plt.subplots(figsize=(10, 5)); ax.imshow(wc, interpolation='bilinear'); ax.axis("off"); ax.set_title("Most Common Words in Comments", fontproperties=BANGLA_FONT)
397
- except Exception as e: logger.error(f"YouTube WordCloud failed: {e}")
398
-
399
- if 'sentiment_label' in comments_df.columns:
400
- sentiment_counts = comments_df['sentiment_label'].value_counts()
401
- if not sentiment_counts.empty:
402
- fig_pie, ax = plt.subplots(figsize=(6, 6)); ax.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%', startangle=90, colors=['#66c2a5', '#fc8d62', '#8da0cb']); ax.set_title("Overall Comment Sentiment", fontproperties=BANGLA_FONT)
403
-
404
- top_videos_by_comment = comments_df['video_title'].value_counts().nlargest(10).index
405
- video_sentiment = comments_df.groupby('video_title')['sentiment_label'].value_counts(normalize=True).unstack().mul(100).reindex(top_videos_by_comment).dropna(how='all')
406
- if not video_sentiment.empty:
407
- fig_sentiment_video, ax = plt.subplots(figsize=(10, 8)); video_sentiment.plot(kind='barh', stacked=True, ax=ax, colormap='viridis'); ax.set_title("Comment Sentiment by Top 10 Videos", fontproperties=BANGLA_FONT); ax.set_yticklabels(video_sentiment.index, fontproperties=BANGLA_FONT); plt.tight_layout()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
 
409
- return {**kpis, yt_channel_plot: fig_channels, yt_wordcloud_plot: fig_wc, yt_sentiment_pie_plot: fig_pie, yt_sentiment_by_video_plot: fig_sentiment_video}
 
 
 
 
 
 
 
 
410
 
411
  def generate_youtube_topic_dashboard(videos_df_full_scan: pd.DataFrame):
412
  if videos_df_full_scan is None or videos_df_full_scan.empty: return None, None, None
413
  set_plot_style()
414
 
415
  channel_views = videos_df_full_scan.groupby('channel')['view_count'].sum().nlargest(15).sort_values()
416
- fig_channel_views, ax = plt.subplots(figsize=(10, 7)); channel_views.plot(kind='barh', ax=ax, color='purple'); ax.set_title("Channel Dominance by Total Views (Top 15)", fontproperties=BANGLA_FONT); ax.set_xlabel("Combined Views on Topic"); ax.set_yticklabels(channel_views.index, fontproperties=BANGLA_FONT); plt.tight_layout()
417
 
418
  df_sample = videos_df_full_scan.sample(n=min(len(videos_df_full_scan), 200))
419
  avg_views, avg_engagement = df_sample['view_count'].median(), df_sample['engagement_rate'].median()
420
  fig_quadrant, ax = plt.subplots(figsize=(10, 8)); sns.scatterplot(data=df_sample, x='view_count', y='engagement_rate', size='like_count', sizes=(20, 400), hue='channel', alpha=0.7, ax=ax, legend=False)
421
  ax.set_xscale('log'); ax.set_yscale('log'); ax.set_title("Content Performance Quadrant", fontproperties=BANGLA_FONT); ax.set_xlabel("Video Views (Log Scale)", fontproperties=BANGLA_FONT); ax.set_ylabel("Engagement Rate (Log Scale)", fontproperties=BANGLA_FONT)
422
- ax.axhline(avg_engagement, ls='--', color='gray'); ax.axvline(avg_views, ls='--', color='gray'); ax.text(avg_views*1.1, ax.get_ylim()[1], 'High Performers', color='green', fontproperties=BANGLA_FONT); ax.text(ax.get_xlim()[0], avg_engagement*1.1, 'Niche Stars', color='blue', fontproperties=BANGLA_FONT)
423
 
424
  fig_age, ax = plt.subplots(figsize=(10, 7)); sns.scatterplot(data=df_sample, x='published_date', y='view_count', size='engagement_rate', sizes=(20, 400), alpha=0.6, ax=ax)
425
- ax.set_yscale('log'); ax.set_title("Content Age vs. Impact", fontproperties=BANGLA_FONT); ax.set_xlabel("Publication Date", fontproperties=BANGLA_FONT); ax.set_ylabel("Views (Log Scale)", fontproperties=BANGLA_FONT); plt.xticks(rotation=45)
426
-
427
  return fig_channel_views, fig_quadrant, fig_age
428
 
429
  # ==============================================================================
@@ -442,14 +569,14 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange"),
442
  with gr.Row():
443
  with gr.Column(scale=1):
444
  gr.Markdown("### 1. Search Criteria")
445
- search_keywords_textbox = gr.Textbox(label="Search Keywords", placeholder="e.g., বিএনপি সমাবেশ")
446
  sites_to_search_textbox = gr.Textbox(label="Target Sites (Optional, comma-separated)", placeholder="e.g., prothomalo.com")
447
  start_date_textbox = gr.Textbox(label="Start Date", placeholder="YYYY-MM-DD or 'last week'")
448
  end_date_textbox = gr.Textbox(label="End Date", placeholder="YYYY-MM-DD or 'today'")
449
  gr.Markdown("### 2. Scraping Parameters")
450
  interval_days_slider = gr.Slider(1, 7, 3, step=1, label="Days per Interval")
451
  max_pages_slider = gr.Slider(1, 10, 5, step=1, label="Max Pages per Interval")
452
- filter_keywords_textbox = gr.Textbox(label="Filter Keywords (comma-separated, optional)", placeholder="e.g., নির্বাচন, সরকার")
453
  start_scraper_button = gr.Button("Start Scraping & Analysis", variant="primary")
454
  with gr.Column(scale=2):
455
  scraper_results_df = gr.DataFrame(label="Filtered Results", interactive=False, wrap=True)
@@ -475,33 +602,33 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange"),
475
  with gr.TabItem("3. YouTube Topic Analysis", id=2):
476
  with gr.Row():
477
  with gr.Column(scale=1):
478
- gr.Markdown("### 1. YouTube API & Search")
479
- yt_api_key = gr.Textbox(label="YouTube API Key", type="password", placeholder="Paste your API key")
480
- yt_search_keywords = gr.Textbox(label="Search Keywords", placeholder="e.g., বিএনপি, তারেক রহমান")
481
  yt_published_after = gr.Textbox(label="Published After Date (Optional)", placeholder="YYYY-MM-DD or '1 month ago'")
482
- gr.Markdown("### 2. Analysis Parameters")
483
  yt_max_videos_for_stats = gr.Slider(label="Videos to Scan for Topic Stats (Broad Scan)", minimum=50, maximum=750, value=300, step=50)
484
  yt_num_videos_for_comments = gr.Slider(label="Top Videos for Comment Analysis (Deep Dive)", minimum=5, maximum=100, value=25, step=5)
485
  yt_max_comments = gr.Slider(10, 100, 30, step=10, label="Max Comments per Video")
486
  start_yt_analysis_button = gr.Button("Start YouTube Analysis", variant="primary")
487
  with gr.Column(scale=2):
488
  with gr.Group(visible=False) as yt_dashboard_group:
489
- gr.Markdown("### Topic Footprint KPIs (Based on Broad Scan)")
490
  with gr.Row():
491
  kpi_yt_total_topic_videos = gr.Textbox(label="Est. Total Videos on Topic (YT)", interactive=False)
492
  kpi_yt_videos_found = gr.Textbox(label="Videos Scanned for Stats", interactive=False)
493
  kpi_yt_views_scanned = gr.Textbox(label="Combined Views (of Scanned)", interactive=False)
494
  kpi_yt_comments_scraped = gr.Textbox(label="Comments Analyzed (from Top Videos)", interactive=False)
495
  with gr.Tabs():
496
- with gr.TabItem("Deep Dive Analysis (on Top Videos)"):
497
  yt_videos_df_output = gr.DataFrame(label="Top Videos Analyzed for Comments (sorted by views)")
498
- with gr.Row():
499
- yt_channel_plot = gr.Plot(label="Channel Contribution by Video Count")
500
- yt_sentiment_pie_plot = gr.Plot(label="Overall Comment Sentiment")
501
- with gr.Row():
502
- yt_wordcloud_plot = gr.Plot(label="Comment Word Cloud")
503
- yt_sentiment_by_video_plot = gr.Plot(label="Comment Sentiment by Video")
504
- with gr.TabItem("Topic-Level Analytics (on All Scanned Videos)"):
505
  yt_channel_views_plot = gr.Plot(label="Channel Dominance by Views")
506
  yt_performance_quadrant_plot = gr.Plot(label="Content Performance Quadrant")
507
  yt_content_age_plot = gr.Plot(label="Content Age vs. Impact")
@@ -534,11 +661,22 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange"),
534
 
535
  def update_news_dashboards(analyzed_df):
536
  if analyzed_df is None or analyzed_df.empty:
537
- return {scraper_dashboard_group: gr.update(visible=False), sentiment_dashboard_tab: gr.update(visible=False)}
538
-
539
  scraper_updates = generate_scraper_dashboard(analyzed_df)
540
  sentiment_updates = generate_sentiment_dashboard(analyzed_df)
541
- return {**scraper_updates, **sentiment_updates}
 
 
 
 
 
 
 
 
 
 
 
 
542
 
543
  news_ui_components = [
544
  scraper_dashboard_group, kpi_total_articles, kpi_unique_media, kpi_date_range,
@@ -572,26 +710,25 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange"),
572
 
573
  def update_youtube_dashboards(results_data):
574
  if not results_data or results_data.get("full_scan") is None or results_data["full_scan"].empty:
575
- return {
576
- yt_dashboard_group: gr.update(visible=False), kpi_yt_total_topic_videos: "0",
577
- kpi_yt_videos_found: "0", kpi_yt_views_scanned: "0", kpi_yt_comments_scraped: "0",
578
- yt_channel_plot: None, yt_wordcloud_plot: None, yt_sentiment_pie_plot: None,
579
- yt_sentiment_by_video_plot: None, yt_channel_views_plot: None,
580
- yt_performance_quadrant_plot: None, yt_content_age_plot: None
581
- }
582
-
583
  videos_df_full, comments_df, total_estimate = results_data.get("full_scan"), results_data.get("comments"), results_data.get("total_estimate", 0)
584
  deep_dive_updates = generate_youtube_dashboard(videos_df_full, comments_df)
585
  fig_ch_views, fig_quad, fig_age = generate_youtube_topic_dashboard(videos_df_full)
586
-
587
- return {
588
- yt_dashboard_group: gr.update(visible=True),
589
- kpi_yt_total_topic_videos: f"{total_estimate:,}",
590
- **deep_dive_updates,
591
- yt_channel_views_plot: fig_ch_views,
592
- yt_performance_quadrant_plot: fig_quad,
593
- yt_content_age_plot: fig_age,
594
- }
 
 
 
 
 
 
595
 
596
  yt_ui_components = [
597
  yt_dashboard_group, kpi_yt_total_topic_videos, kpi_yt_videos_found, kpi_yt_views_scanned, kpi_yt_comments_scraped,
@@ -613,6 +750,6 @@ if __name__ == "__main__":
613
  logger.info("Using authentication credentials from environment variable.")
614
  else:
615
  logger.warning("No AUTH_CREDENTIALS found. Using default insecure credentials. Set this as an environment variable for production.")
616
- auth_tuple = ("bnp", "12345")
617
 
618
  app.launch(debug=True, auth=auth_tuple)
 
8
  import pandas as pd
9
  import numpy as np
10
  import torch
 
11
  import sqlite3
12
  import json
13
  import logging
 
20
  from datetime import datetime, timezone
21
  from logging.handlers import RotatingFileHandler
22
 
23
+ # --- NLP & Machine Learning ---
24
+ # BanglaBERT tokenizer removed for simplicity
 
 
 
25
  import dateparser
26
 
27
  # --- NLP & Machine Learning ---
28
+ from transformers import pipeline, BitsAndBytesConfig, AutoTokenizer
29
  from sentence_transformers import SentenceTransformer
30
  from huggingface_hub.utils import HfHubHTTPError
31
 
 
50
 
51
  # --- APPLICATION CONFIGURATION ---
52
  APP_TITLE = "Social Perception Analyzer"
53
+ APP_TAGLINE = "A flatform for understanding Netizens dynamics"
54
+ APP_FOOTER = "Developed by Arjon"
55
 
56
  # --- FONT CONFIGURATION ---
57
  FONT_PATH = 'NotoSansBengali-Regular.ttf'
 
65
 
66
  # ==============================================================================
67
  # CORE HELPER FUNCTIONS
68
+ def clean_bengali_text(text):
69
+ # Remove non-Bengali characters except spaces and underscores (for joined phrases)
70
+ # Preserve word shapes by not removing valid combining marks
71
+ cleaned = re.sub(r'[^\u0980-\u09FF_\s]', '', str(text))
72
+ # Remove extra spaces
73
+ cleaned = re.sub(r'\s+', ' ', cleaned).strip()
74
+ return cleaned
75
+ NOTEBOOK_STOPWORDS = set([
76
+ 'এবং', 'ও', 'বা', 'কিংবা', 'অথবা', 'কিন্তু', 'এর', 'এ', 'এই', 'সেই', 'ওই', 'এক', 'জন্য',
77
+ 'আমার', 'তোমার', 'তার', 'আমাদের', 'তাদের', 'সে', 'তিনি', 'আমি', 'তুমি', 'যে', 'যায়', 'হয়',
78
+ 'হবে', 'ছিল', 'আছে', 'নেই', 'এটা', 'ওটা', 'সেটা', 'করে', 'করতে', 'করেছে', 'করছেন', 'থেকে',
79
+ 'সাথে', 'মধ্যে', 'উপরে', 'নিচে', 'পরে', 'আগে', 'শুধু', 'খুব', 'অনেক', 'আরও', 'হিসাবে', 'তাহলে',
80
+ 'হলে', 'তাই', 'সুতরাং', 'কারণে', 'একটি', 'হয়ে', 'হয়েছিল', 'হচ্ছে', 'হয়েছে', 'না', 'হ্যাঁ', 'কি',
81
+ 'কী', 'কে', 'কোন', 'গুলো', 'কিছু', 'বলেন', 'বললেন', 'বলল', 'আর', 'ভাই', 'হোক', 'চাই', 'বাদ',
82
+ 'দিতে', 'দিয়ে', 'দিলেন', 'দেন', 'যাবে', 'যাক', 'পারা', 'পারে', 'করা', 'করি', 'করার', 'করছে',
83
+ 'করবে', 'সব', 'এখন', 'যদি', 'কেন', 'কবে', 'কেমন', 'ইনশাআল্লাহ', 'আপনি', 'আপনার', 'আপনারা', 'আমরা'
84
+ ])
85
+ COMBINED_STOPWORDS = set(BANGLA_STOP_WORDS) | NOTEBOOK_STOPWORDS
86
+ PHRASES_TO_JOIN = {
87
+ "তারেক রহমান": "তারেক_রহমান",
88
+ "খালেদা জিয়া": "খালেদা_জিয়া",
89
+ "বিএনপি জিন্দাবাদ": "বিএনপি_জিন্দাবাদ"
90
+ # Add more as needed
91
+ }
92
  # ==============================================================================
93
 
94
  BANGLA_STOP_WORDS = [
 
121
 
122
  # ==============================================================================
123
  # ML MODEL MANAGEMENT
124
+ TOKENIZER_MODEL_ID = "csebuetnlp/banglabert_large"
125
+ TOKENIZER = None
126
 
127
+ def get_bangla_tokenizer():
128
+ global TOKENIZER
129
+ if TOKENIZER is None:
 
 
 
130
  try:
131
+ TOKENIZER = AutoTokenizer.from_pretrained(TOKENIZER_MODEL_ID)
132
+ logger.info("BanglaBERT tokenizer loaded successfully.")
 
 
 
 
 
 
 
133
  except Exception as e:
134
+ logger.error(f"Failed to load BanglaBERT tokenizer: {e}")
135
+ TOKENIZER = None
136
+ return TOKENIZER
137
+ # ==============================================================================
138
 
139
+
140
+ ## Sentiment pipeline code removed for optimization
 
 
141
 
142
  # ==============================================================================
143
  # NEWS SCRAPER BACKEND
 
157
 
158
  all_articles, current_dt = [], start_dt
159
  while current_dt <= end_dt:
160
+ try:
161
+ interval_end_dt = min(current_dt + pd.Timedelta(days=interval - 1), end_dt)
162
+ start_str, end_str = current_dt.strftime('%Y-%m-%d'), interval_end_dt.strftime('%Y-%m-%d')
163
+ progress(0, desc=f"Fetching news from {start_str} to {end_str}")
164
+ site_query = f"({' OR '.join(['site:' + s.strip() for s in sites.split(',') if s.strip()])})" if sites else ""
165
+ final_query = f'"{search_keywords}" {site_query} after:{start_str} before:{end_str}'
166
+ googlenews = GoogleNews(lang='bn', region='BD')
167
+ googlenews.search(final_query)
168
+ for page in range(1, max_pages + 1):
169
+ try:
170
+ results = googlenews.results()
171
+ if not results: break
172
+ all_articles.extend(results)
173
+ if page < max_pages:
174
+ googlenews.getpage(page + 1)
175
+ time.sleep(0.5) # Reduced sleep for performance
176
+ except HTTPError as e:
177
+ if e.code == 429:
178
+ wait_time = 5 # Reduced wait for optimization
179
+ gr.Warning(f"Rate limited by Google News. Pausing for {wait_time:.0f} seconds.")
180
+ time.sleep(wait_time)
181
+ else:
182
+ logger.error(f"HTTP Error fetching news: {e}"); break
183
+ except Exception as e:
184
+ logger.error(f"An error occurred fetching news: {e}"); break
185
+ current_dt += pd.Timedelta(days=interval)
186
+ except Exception as e:
187
+ logger.error(f"Error in news scraping loop: {e}")
188
+ break
189
 
190
  if not all_articles: return pd.DataFrame(), pd.DataFrame()
191
 
 
194
  df.dropna(subset=['published_date', 'title'], inplace=True)
195
 
196
  if filter_keys and filter_keys.strip():
197
+ # Advanced filtering logic: supports AND, OR, NOT, and phrase search
198
+ def parse_query(query):
199
+ # Simple parser for AND, OR, NOT, and phrase queries
200
+ query = query.lower()
201
+ tokens = re.findall(r'"[^"]+"|\S+', query)
202
+ expr = []
203
+ for token in tokens:
204
+ if token == 'and': expr.append('&')
205
+ elif token == 'or': expr.append('|')
206
+ elif token == 'not': expr.append('!')
207
+ else:
208
+ if token.startswith('"') and token.endswith('"'):
209
+ expr.append(f'"{token[1:-1]}"')
210
+ else:
211
+ expr.append(f'"{token}"')
212
+ return ' '.join(expr)
213
+
214
+ def match_complex_query(text, query):
215
+ # Evaluate the parsed query against the text
216
+ text = text.lower()
217
+ expr = parse_query(query)
218
+ # Replace quoted terms with their presence in text
219
+ def term_eval(term):
220
+ term = term.strip('"')
221
+ return term in text
222
+ # Replace operators with Python equivalents
223
+ expr = re.sub(r'"([^"]+)"', lambda m: str(term_eval(m.group(0))), expr)
224
+ expr = expr.replace('&', ' and ').replace('|', ' or ').replace('!', ' not ')
225
+ try:
226
+ return eval(expr)
227
+ except Exception:
228
+ return False
229
+
230
+ mask = df.apply(lambda row: match_complex_query(str(row['title']) + ' ' + str(row['desc']), filter_keys), axis=1)
231
  df = df[mask]
232
 
233
  return df, df[['published_date', 'title', 'media', 'desc', 'link']].sort_values(by='published_date', ascending=False)
 
276
  return comments_list
277
 
278
  def run_youtube_analysis_pipeline(api_key, query, max_videos_for_stats, num_videos_for_comments, max_comments_per_video, published_after, progress=gr.Progress()):
279
+ # Use integrated API key for seamless experience
280
+ api_key = "AIzaSyB_f3uROqZfwBWsc_sDEV63WmUHBgvGGqw"
281
  if not query: raise gr.Error("Search Keywords are required.")
282
  try:
283
  youtube = build('youtube', 'v3', developerKey=api_key)
 
349
  plt.rcParams['figure.dpi'] = 100
350
 
351
  def run_sentiment_analysis(df: pd.DataFrame, text_column: str, progress=gr.Progress()):
352
+ # Sentiment analysis removed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  return df
354
 
355
  def generate_scraper_dashboard(df: pd.DataFrame):
 
370
  ax.set_yticklabels(media_counts.index, fontproperties=BANGLA_FONT); ax.set_xlabel("Article Count"); plt.tight_layout()
371
 
372
  text = " ".join(title for title in df['title'].astype(str))
373
+ text = clean_bengali_text(text)
374
+ for phrase, joined in PHRASES_TO_JOIN.items():
375
+ text = text.replace(phrase, joined)
376
  fig_wc = None
377
  try:
378
+ words = re.findall(r'[\u0980-\u09FF_]{2,}', text)
379
+ words = [w for w in words if w not in COMBINED_STOPWORDS]
380
+ words = [w for w in words if len(w) > 1]
381
+ words = [w for w in words if not re.search(r'[a-zA-Z]', w)]
382
+ from collections import Counter
383
+ word_freq = Counter(words)
384
+ min_freq = 2
385
+ most_common = set([w for w, _ in word_freq.most_common(3)])
386
+ filtered_words = [w for w in words if word_freq[w] >= min_freq and w not in most_common]
387
+ wc_text = " ".join(filtered_words)
388
+ wc = WordCloud(
389
+ font_path=FONT_PATH,
390
+ width=1600,
391
+ height=900,
392
+ background_color='white',
393
+ stopwords=COMBINED_STOPWORDS,
394
+ collocations=False,
395
+ colormap='plasma',
396
+ max_words=200,
397
+ contour_width=2,
398
+ contour_color='steelblue',
399
+ regexp=r"[\u0980-\u09FF_]+"
400
+ ).generate(wc_text)
401
+ fig_wc, ax = plt.subplots(figsize=(15, 8))
402
+ ax.imshow(wc, interpolation='bilinear')
403
+ ax.axis("off")
404
+ ax.set_title("Bengali Headline Word Cloud", fontproperties=BANGLA_FONT, fontsize=22)
405
+ plt.tight_layout()
406
+ except Exception as e:
407
+ gr.Warning(f"WordCloud failed: {e}")
408
 
409
  return {
410
  kpi_total_articles: str(total_articles), kpi_unique_media: str(unique_media), kpi_date_range: date_range_str,
 
413
  }
414
 
415
  def generate_sentiment_dashboard(df: pd.DataFrame):
416
+ # Sentiment dashboard removed
417
+ return {sentiment_dashboard_tab: gr.update(visible=False)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
 
419
  def generate_youtube_dashboard(videos_df, comments_df):
420
  set_plot_style()
421
  kpis = {
422
+ kpi_yt_videos_found: f"{len(videos_df):,}" if videos_df is not None and not videos_df.empty else "0",
423
+ kpi_yt_views_scanned: f"{videos_df['view_count'].sum():,}" if videos_df is not None and not videos_df.empty and 'view_count' in videos_df.columns else "0",
424
+ kpi_yt_comments_scraped: f"{len(comments_df):,}" if comments_df is not None and not comments_df.empty else "0"
425
  }
426
 
427
+ fig_channels, ax = None, None
428
+ if videos_df is not None and not videos_df.empty and 'channel' in videos_df.columns:
429
+ channel_counts = videos_df['channel'].value_counts().nlargest(15).sort_values()
430
+ if not channel_counts.empty:
431
+ fig_channels, ax = plt.subplots(figsize=(8, 6))
432
+ channel_counts.plot(kind='barh', ax=ax, color='coral'); ax.set_title("Top 15 Channels by Video Volume", fontproperties=BANGLA_FONT); ax.set_yticklabels(channel_counts.index, fontproperties=BANGLA_FONT); plt.tight_layout()
433
+
434
+ # Rich analytics: engagement, top videos, comment activity, time series, etc.
435
+ fig_wc, fig_top_videos, fig_engagement, fig_comment_activity, fig_time_series = None, None, None, None, None
436
  if comments_df is not None and not comments_df.empty:
437
+ # Top commented videos
438
+ fig_top_videos, ax = None, None
439
+ if 'video_title' in comments_df.columns:
440
+ top_videos = comments_df['video_title'].value_counts().nlargest(10)
441
+ if not top_videos.empty:
442
+ fig_top_videos, ax = plt.subplots(figsize=(10, 6))
443
+ top_videos.plot(kind='barh', ax=ax, color='dodgerblue')
444
+ ax.set_title("Top 10 Videos by Comment Count", fontproperties=BANGLA_FONT)
445
+ ax.set_xlabel("Comment Count")
446
+ ax.set_yticklabels(top_videos.index, fontproperties=BANGLA_FONT)
447
+ plt.tight_layout()
448
+ plt.close(fig_top_videos)
449
+
450
+ # Engagement rate per video
451
+ fig_engagement, ax = None, None
452
+ if 'video_id' in comments_df.columns and 'video_title' in comments_df.columns:
453
+ engagement_df = comments_df.groupby('video_title').size().to_frame('comment_count')
454
+ if videos_df is not None and not videos_df.empty:
455
+ merged = videos_df.set_index('video_title').join(engagement_df, lsuffix='_video', rsuffix='_comment')
456
+ # If 'comment_count' is missing, fill with 0
457
+ if 'comment_count' not in merged.columns:
458
+ merged['comment_count'] = 0
459
+ # If 'view_count' is missing, fill with 1 to avoid division by zero
460
+ if 'view_count' not in merged.columns:
461
+ merged['view_count'] = 1
462
+ merged['engagement_rate'] = merged['comment_count'] / merged['view_count']
463
+ merged = merged.sort_values('engagement_rate', ascending=False).head(10)
464
+ if not merged.empty:
465
+ fig_engagement, ax = plt.subplots(figsize=(10, 6))
466
+ merged['engagement_rate'].plot(kind='barh', ax=ax, color='mediumseagreen')
467
+ ax.set_title("Top 10 Videos by Engagement Rate", fontproperties=BANGLA_FONT)
468
+ ax.set_xlabel("Engagement Rate (Comments / Views)")
469
+ ax.set_yticklabels(merged.index, fontproperties=BANGLA_FONT)
470
+ plt.tight_layout()
471
+ plt.close(fig_engagement)
472
+
473
+ # Comment activity over time
474
+ fig_time_series, ax = None, None
475
+ if 'published_date_comment' in comments_df.columns:
476
+ try:
477
+ comments_df['published_date_comment'] = pd.to_datetime(comments_df['published_date_comment'])
478
+ time_series = comments_df.set_index('published_date_comment').resample('D').size()
479
+ if not time_series.empty:
480
+ fig_time_series, ax = plt.subplots(figsize=(10, 4))
481
+ time_series.plot(ax=ax, color='darkorange')
482
+ ax.set_title("Comment Activity Over Time", fontproperties=BANGLA_FONT)
483
+ ax.set_xlabel("Date")
484
+ ax.set_ylabel("Number of Comments")
485
+ plt.tight_layout()
486
+ plt.close(fig_time_series)
487
+ except Exception as e:
488
+ logger.error(f"Error in comment activity plot: {e}")
489
+
490
+ # Beautiful Bengali word cloud from YouTube comments
491
+ fig_wc, ax = None, None
492
+ if 'comment_text' in comments_df.columns:
493
+ text = " ".join(comment for comment in comments_df['comment_text'].astype(str))
494
+ text = clean_bengali_text(text)
495
+ for phrase, joined in PHRASES_TO_JOIN.items():
496
+ text = text.replace(phrase, joined)
497
+ try:
498
+ words = re.findall(r'[\u0980-\u09FF_]{2,}', text)
499
+ words = [w for w in words if w not in COMBINED_STOPWORDS]
500
+ words = [w for w in words if len(w) > 1]
501
+ words = [w for w in words if not re.search(r'[a-zA-Z]', w)]
502
+ from collections import Counter
503
+ word_freq = Counter(words)
504
+ min_freq = 2
505
+ most_common = set([w for w, _ in word_freq.most_common(3)])
506
+ filtered_words = [w for w in words if word_freq[w] >= min_freq and w not in most_common]
507
+ wc_text = " ".join(filtered_words)
508
+ wc = WordCloud(
509
+ font_path=FONT_PATH,
510
+ width=1600,
511
+ height=900,
512
+ background_color='white',
513
+ stopwords=COMBINED_STOPWORDS,
514
+ collocations=False,
515
+ colormap='plasma',
516
+ max_words=250,
517
+ contour_width=2,
518
+ contour_color='darkorange',
519
+ regexp=r"[\u0980-\u09FF_]+"
520
+ ).generate(wc_text)
521
+ fig_wc, ax = plt.subplots(figsize=(15, 8))
522
+ ax.imshow(wc, interpolation='bilinear')
523
+ ax.axis("off")
524
+ ax.set_title("Bengali Word Cloud from YouTube Comments", fontproperties=BANGLA_FONT, fontsize=22)
525
+ plt.tight_layout()
526
+ except Exception as e:
527
+ logger.error(f"YouTube WordCloud failed: {e}")
528
 
529
+ return {
530
+ **kpis,
531
+ yt_channel_plot: fig_channels,
532
+ yt_wordcloud_plot: fig_wc,
533
+ 'yt_top_videos_plot': fig_top_videos,
534
+ 'yt_engagement_plot': fig_engagement,
535
+ 'yt_comment_activity_plot': fig_comment_activity,
536
+ 'yt_time_series_plot': fig_time_series
537
+ }
538
 
539
  def generate_youtube_topic_dashboard(videos_df_full_scan: pd.DataFrame):
540
  if videos_df_full_scan is None or videos_df_full_scan.empty: return None, None, None
541
  set_plot_style()
542
 
543
  channel_views = videos_df_full_scan.groupby('channel')['view_count'].sum().nlargest(15).sort_values()
544
+ fig_channel_views, ax = plt.subplots(figsize=(10, 7)); channel_views.plot(kind='barh', ax=ax, color='purple'); ax.set_title("Channel Dominance by Total Views (Top 15)", fontproperties=BANGLA_FONT); ax.set_xlabel("Combined Views on Topic"); ax.set_yticklabels(channel_views.index, fontproperties=BANGLA_FONT); plt.tight_layout(); plt.close(fig_channel_views)
545
 
546
  df_sample = videos_df_full_scan.sample(n=min(len(videos_df_full_scan), 200))
547
  avg_views, avg_engagement = df_sample['view_count'].median(), df_sample['engagement_rate'].median()
548
  fig_quadrant, ax = plt.subplots(figsize=(10, 8)); sns.scatterplot(data=df_sample, x='view_count', y='engagement_rate', size='like_count', sizes=(20, 400), hue='channel', alpha=0.7, ax=ax, legend=False)
549
  ax.set_xscale('log'); ax.set_yscale('log'); ax.set_title("Content Performance Quadrant", fontproperties=BANGLA_FONT); ax.set_xlabel("Video Views (Log Scale)", fontproperties=BANGLA_FONT); ax.set_ylabel("Engagement Rate (Log Scale)", fontproperties=BANGLA_FONT)
550
+ ax.axhline(avg_engagement, ls='--', color='gray'); ax.axvline(avg_views, ls='--', color='gray'); ax.text(avg_views*1.1, ax.get_ylim()[1], 'High Performers', color='green', fontproperties=BANGLA_FONT); ax.text(ax.get_xlim()[0], avg_engagement*1.1, 'Niche Stars', color='blue', fontproperties=BANGLA_FONT); plt.close(fig_quadrant)
551
 
552
  fig_age, ax = plt.subplots(figsize=(10, 7)); sns.scatterplot(data=df_sample, x='published_date', y='view_count', size='engagement_rate', sizes=(20, 400), alpha=0.6, ax=ax)
553
+ ax.set_yscale('log'); ax.set_title("Content Age vs. Impact", fontproperties=BANGLA_FONT); ax.set_xlabel("Publication Date", fontproperties=BANGLA_FONT); ax.set_ylabel("Views (Log Scale)", fontproperties=BANGLA_FONT); plt.xticks(rotation=45); plt.close(fig_age)
 
554
  return fig_channel_views, fig_quadrant, fig_age
555
 
556
  # ==============================================================================
 
569
  with gr.Row():
570
  with gr.Column(scale=1):
571
  gr.Markdown("### 1. Search Criteria")
572
+ search_keywords_textbox = gr.Textbox(label="Search Keywords", placeholder="e.g.,ডাকসু ")
573
  sites_to_search_textbox = gr.Textbox(label="Target Sites (Optional, comma-separated)", placeholder="e.g., prothomalo.com")
574
  start_date_textbox = gr.Textbox(label="Start Date", placeholder="YYYY-MM-DD or 'last week'")
575
  end_date_textbox = gr.Textbox(label="End Date", placeholder="YYYY-MM-DD or 'today'")
576
  gr.Markdown("### 2. Scraping Parameters")
577
  interval_days_slider = gr.Slider(1, 7, 3, step=1, label="Days per Interval")
578
  max_pages_slider = gr.Slider(1, 10, 5, step=1, label="Max Pages per Interval")
579
+ filter_keywords_textbox = gr.Textbox(label="Filter Keywords (comma-separated, optional)", placeholder="e.g., নির্বাচন, ভিসি")
580
  start_scraper_button = gr.Button("Start Scraping & Analysis", variant="primary")
581
  with gr.Column(scale=2):
582
  scraper_results_df = gr.DataFrame(label="Filtered Results", interactive=False, wrap=True)
 
602
  with gr.TabItem("3. YouTube Topic Analysis", id=2):
603
  with gr.Row():
604
  with gr.Column(scale=1):
605
+ gr.Markdown("### YouTube Search & Analysis")
606
+ yt_api_key = gr.Textbox(label="YouTube API Key", placeholder="Paste your YouTube Data API v3 key here")
607
+ yt_search_keywords = gr.Textbox(label="Search Keywords", placeholder="e.g.,বাংলাদেশ, নির্বাচন")
608
  yt_published_after = gr.Textbox(label="Published After Date (Optional)", placeholder="YYYY-MM-DD or '1 month ago'")
609
+ gr.Markdown("### Analysis Parameters")
610
  yt_max_videos_for_stats = gr.Slider(label="Videos to Scan for Topic Stats (Broad Scan)", minimum=50, maximum=750, value=300, step=50)
611
  yt_num_videos_for_comments = gr.Slider(label="Top Videos for Comment Analysis (Deep Dive)", minimum=5, maximum=100, value=25, step=5)
612
  yt_max_comments = gr.Slider(10, 100, 30, step=10, label="Max Comments per Video")
613
  start_yt_analysis_button = gr.Button("Start YouTube Analysis", variant="primary")
614
  with gr.Column(scale=2):
615
  with gr.Group(visible=False) as yt_dashboard_group:
616
+ gr.Markdown("### YouTube Topic Analytics Dashboard")
617
  with gr.Row():
618
  kpi_yt_total_topic_videos = gr.Textbox(label="Est. Total Videos on Topic (YT)", interactive=False)
619
  kpi_yt_videos_found = gr.Textbox(label="Videos Scanned for Stats", interactive=False)
620
  kpi_yt_views_scanned = gr.Textbox(label="Combined Views (of Scanned)", interactive=False)
621
  kpi_yt_comments_scraped = gr.Textbox(label="Comments Analyzed (from Top Videos)", interactive=False)
622
  with gr.Tabs():
623
+ with gr.TabItem("Top Videos & Engagement"):
624
  yt_videos_df_output = gr.DataFrame(label="Top Videos Analyzed for Comments (sorted by views)")
625
+ yt_top_videos_plot = gr.Plot(label="Top 10 Videos by Comment Count")
626
+ yt_engagement_plot = gr.Plot(label="Top 10 Videos by Engagement Rate")
627
+ with gr.TabItem("Comment Activity & Word Cloud"):
628
+ yt_comment_activity_plot = gr.Plot(label="Comment Activity Over Time")
629
+ yt_wordcloud_plot = gr.Plot(label="Bengali Word Cloud from Comments")
630
+ with gr.TabItem("Channel & Topic Analytics"):
631
+ yt_channel_plot = gr.Plot(label="Channel Contribution by Video Count")
632
  yt_channel_views_plot = gr.Plot(label="Channel Dominance by Views")
633
  yt_performance_quadrant_plot = gr.Plot(label="Content Performance Quadrant")
634
  yt_content_age_plot = gr.Plot(label="Content Age vs. Impact")
 
661
 
662
  def update_news_dashboards(analyzed_df):
663
  if analyzed_df is None or analyzed_df.empty:
664
+ return [gr.update(visible=False), '', '', '', None, None, None, gr.update(visible=False), None, None]
 
665
  scraper_updates = generate_scraper_dashboard(analyzed_df)
666
  sentiment_updates = generate_sentiment_dashboard(analyzed_df)
667
+ # Return outputs in the exact order of news_ui_components
668
+ return [
669
+ scraper_updates.get(scraper_dashboard_group, gr.update(visible=False)),
670
+ scraper_updates.get(kpi_total_articles, ''),
671
+ scraper_updates.get(kpi_unique_media, ''),
672
+ scraper_updates.get(kpi_date_range, ''),
673
+ scraper_updates.get(dashboard_timeline_plot, None),
674
+ scraper_updates.get(dashboard_media_plot, None),
675
+ scraper_updates.get(dashboard_wordcloud_plot, None),
676
+ sentiment_updates.get(sentiment_dashboard_tab, gr.update(visible=False)),
677
+ sentiment_updates.get(sentiment_pie_plot, None),
678
+ sentiment_updates.get(sentiment_by_media_plot, None)
679
+ ]
680
 
681
  news_ui_components = [
682
  scraper_dashboard_group, kpi_total_articles, kpi_unique_media, kpi_date_range,
 
710
 
711
  def update_youtube_dashboards(results_data):
712
  if not results_data or results_data.get("full_scan") is None or results_data["full_scan"].empty:
713
+ return [gr.update(visible=False), "0", "0", "0", "0", None, None, None, None, None, None, None]
 
 
 
 
 
 
 
714
  videos_df_full, comments_df, total_estimate = results_data.get("full_scan"), results_data.get("comments"), results_data.get("total_estimate", 0)
715
  deep_dive_updates = generate_youtube_dashboard(videos_df_full, comments_df)
716
  fig_ch_views, fig_quad, fig_age = generate_youtube_topic_dashboard(videos_df_full)
717
+ # Return outputs in the exact order of yt_ui_components
718
+ return [
719
+ gr.update(visible=True),
720
+ f"{total_estimate:,}",
721
+ deep_dive_updates.get(kpi_yt_videos_found, "0"),
722
+ deep_dive_updates.get(kpi_yt_views_scanned, "0"),
723
+ deep_dive_updates.get(kpi_yt_comments_scraped, "0"),
724
+ deep_dive_updates.get(yt_channel_plot, None),
725
+ deep_dive_updates.get(yt_wordcloud_plot, None),
726
+ deep_dive_updates.get(yt_sentiment_pie_plot, None),
727
+ deep_dive_updates.get(yt_sentiment_by_video_plot, None),
728
+ fig_ch_views,
729
+ fig_quad,
730
+ fig_age
731
+ ]
732
 
733
  yt_ui_components = [
734
  yt_dashboard_group, kpi_yt_total_topic_videos, kpi_yt_videos_found, kpi_yt_views_scanned, kpi_yt_comments_scraped,
 
750
  logger.info("Using authentication credentials from environment variable.")
751
  else:
752
  logger.warning("No AUTH_CREDENTIALS found. Using default insecure credentials. Set this as an environment variable for production.")
753
+ auth_tuple = ("Arjon", "12345")
754
 
755
  app.launch(debug=True, auth=auth_tuple)