Dmitry Beresnev commited on
Commit ·
20fa678
1
Parent(s): 97c4ac7
fix news dashboard
Browse files- app/pages/05_Dashboard.py +72 -81
app/pages/05_Dashboard.py
CHANGED
|
@@ -450,88 +450,79 @@ def fetch_economic_calendar():
|
|
| 450 |
status_placeholder = st.empty()
|
| 451 |
|
| 452 |
# Execute all news fetching operations in parallel using ThreadPoolExecutor
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
executor.submit(fetch_market_events): 'market_events',
|
| 464 |
-
executor.submit(fetch_economic_calendar): 'economic_calendar'
|
| 465 |
-
}
|
| 466 |
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
# Mark incomplete sources
|
| 530 |
-
all_sources = set(futures_map.values())
|
| 531 |
-
incomplete_sources = all_sources - set(completed_sources)
|
| 532 |
-
for source in incomplete_sources:
|
| 533 |
-
fetch_errors.append(f"{source} timed out - skipped")
|
| 534 |
-
completed_sources.append(f"{source} (timeout)")
|
| 535 |
|
| 536 |
# Clear the status message after all sources complete
|
| 537 |
status_placeholder.success(f"✅ Loaded {len(completed_sources)}/8 sources successfully")
|
|
|
|
| 450 |
status_placeholder = st.empty()
|
| 451 |
|
| 452 |
# Execute all news fetching operations in parallel using ThreadPoolExecutor
|
| 453 |
+
_fetch_tasks = [
|
| 454 |
+
(fetch_twitter_news, 'twitter'),
|
| 455 |
+
(fetch_reddit_news, 'reddit'),
|
| 456 |
+
(fetch_rss_news, 'rss'),
|
| 457 |
+
(fetch_ai_tech_news, 'ai_tech'),
|
| 458 |
+
(fetch_prediction_markets, 'predictions'),
|
| 459 |
+
(fetch_sectoral_news, 'sectoral_news'),
|
| 460 |
+
(fetch_market_events, 'market_events'),
|
| 461 |
+
(fetch_economic_calendar, 'economic_calendar'),
|
| 462 |
+
]
|
|
|
|
|
|
|
|
|
|
| 463 |
|
| 464 |
+
def _apply_result(source_name, result_df, error):
|
| 465 |
+
global twitter_df, reddit_df, rss_all_df, rss_main_df, ai_tech_df
|
| 466 |
+
global predictions_df, sectoral_news_df, market_events_df, economic_calendar_df
|
| 467 |
+
if source_name == 'twitter':
|
| 468 |
+
twitter_df = result_df
|
| 469 |
+
elif source_name == 'reddit':
|
| 470 |
+
reddit_df = result_df
|
| 471 |
+
elif source_name == 'rss':
|
| 472 |
+
rss_all_df = result_df
|
| 473 |
+
if not rss_all_df.empty and 'from_web' in rss_all_df.columns:
|
| 474 |
+
rss_main_df = rss_all_df[rss_all_df['from_web'] == True].copy()
|
| 475 |
+
elif source_name == 'ai_tech':
|
| 476 |
+
ai_tech_df = result_df
|
| 477 |
+
elif source_name == 'predictions':
|
| 478 |
+
predictions_df = result_df
|
| 479 |
+
elif source_name == 'sectoral_news':
|
| 480 |
+
sectoral_news_df = result_df
|
| 481 |
+
elif source_name == 'market_events':
|
| 482 |
+
market_events_df = result_df
|
| 483 |
+
elif source_name == 'economic_calendar':
|
| 484 |
+
economic_calendar_df = result_df
|
| 485 |
+
if error:
|
| 486 |
+
fetch_errors.append(error)
|
| 487 |
+
|
| 488 |
+
fetch_errors = []
|
| 489 |
+
completed_sources = []
|
| 490 |
+
|
| 491 |
+
with st.spinner("Loading news from 8 sources..."):
|
| 492 |
+
try:
|
| 493 |
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
| 494 |
+
futures_map = {executor.submit(fn): name for fn, name in _fetch_tasks}
|
| 495 |
+
try:
|
| 496 |
+
for future in as_completed(futures_map, timeout=90):
|
| 497 |
+
source_name = futures_map[future]
|
| 498 |
+
try:
|
| 499 |
+
result_df, error = future.result()
|
| 500 |
+
completed_sources.append(source_name)
|
| 501 |
+
status_placeholder.info(f"🔍 Loaded {len(completed_sources)}/8 sources ({', '.join(completed_sources)})")
|
| 502 |
+
_apply_result(source_name, result_df, error)
|
| 503 |
+
except Exception as e:
|
| 504 |
+
fetch_errors.append(f"Error fetching {source_name} news: {e}")
|
| 505 |
+
completed_sources.append(f"{source_name} (error)")
|
| 506 |
+
status_placeholder.warning(f"⚠️ {source_name} failed, continuing with other sources...")
|
| 507 |
+
except TimeoutError:
|
| 508 |
+
fetch_errors.append("⏱️ Some sources timed out after 90 seconds - displaying available results")
|
| 509 |
+
status_placeholder.warning(f"⚠️ {len(completed_sources)}/8 sources loaded (some timed out)")
|
| 510 |
+
all_sources = set(futures_map.values())
|
| 511 |
+
for source in all_sources - set(completed_sources):
|
| 512 |
+
fetch_errors.append(f"{source} timed out - skipped")
|
| 513 |
+
completed_sources.append(f"{source} (timeout)")
|
| 514 |
+
except RuntimeError:
|
| 515 |
+
# OS thread limit reached — fall back to sequential fetching
|
| 516 |
+
fetch_errors.append("⚠️ Thread limit reached, falling back to sequential fetch")
|
| 517 |
+
for fn, name in _fetch_tasks:
|
| 518 |
+
try:
|
| 519 |
+
result_df, error = fn()
|
| 520 |
+
completed_sources.append(name)
|
| 521 |
+
status_placeholder.info(f"🔍 Loaded {len(completed_sources)}/8 sources (sequential mode)")
|
| 522 |
+
_apply_result(name, result_df, error)
|
| 523 |
+
except Exception as e:
|
| 524 |
+
fetch_errors.append(f"Error fetching {name} news: {e}")
|
| 525 |
+
completed_sources.append(f"{name} (error)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 526 |
|
| 527 |
# Clear the status message after all sources complete
|
| 528 |
status_placeholder.success(f"✅ Loaded {len(completed_sources)}/8 sources successfully")
|