Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,24 +3,26 @@
|
|
| 3 |
import os
|
| 4 |
import gradio as gr
|
| 5 |
import pandas as pd
|
| 6 |
-
|
|
|
|
|
|
|
| 7 |
from feature_engineering import IntegratedTheoryFeatures
|
| 8 |
|
| 9 |
-
#
|
|
|
|
|
|
|
|
|
|
| 10 |
_cached_df = None
|
| 11 |
_cached_dates = (None, None)
|
| 12 |
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
def get_data(start_date: str, end_date: str):
|
| 17 |
-
"""Download or return cached market data."""
|
| 18 |
global _cached_df, _cached_dates
|
| 19 |
if _cached_df is not None and _cached_dates == (start_date, end_date):
|
| 20 |
return _cached_df.copy()
|
| 21 |
|
| 22 |
print(f"📥 Downloading data from {start_date} to {end_date}...")
|
| 23 |
-
downloader = UnifiedMarketDataDownloader(FRED_API_KEY)
|
| 24 |
df = downloader.download_all_data(start_date=start_date, end_date=end_date)
|
| 25 |
|
| 26 |
_cached_df = df.copy()
|
|
@@ -28,38 +30,104 @@ def get_data(start_date: str, end_date: str):
|
|
| 28 |
return df
|
| 29 |
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def run_pipeline(days_back: int = 1825):
|
| 32 |
try:
|
| 33 |
-
# Define date range
|
| 34 |
today = pd.Timestamp.today()
|
| 35 |
start_date = (today - pd.Timedelta(days=days_back)).strftime('%Y-%m-%d')
|
| 36 |
end_date = today.strftime('%Y-%m-%d')
|
| 37 |
|
| 38 |
-
# Get data (cached if possible)
|
| 39 |
df = get_data(start_date, end_date)
|
| 40 |
-
|
| 41 |
-
# Validate data
|
| 42 |
if len(df) < 300:
|
| 43 |
-
return {"Error": "Insufficient data. Try increasing lookback window."}
|
| 44 |
|
| 45 |
-
# Run feature pipeline
|
| 46 |
engine = IntegratedTheoryFeatures(df)
|
| 47 |
features = engine.build_all_features()
|
| 48 |
-
|
| 49 |
-
# Extract latest non-NaN row
|
| 50 |
latest = features.dropna(subset=['regime']).iloc[-1]
|
| 51 |
|
|
|
|
| 52 |
def fmt(x):
|
| 53 |
-
if pd.isna(x):
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
elif isinstance(x, float) and 0 <= x <= 1:
|
| 58 |
-
return f"{x:.1%}"
|
| 59 |
-
else:
|
| 60 |
-
return str(x)
|
| 61 |
|
| 62 |
-
|
| 63 |
"Regime": str(latest["regime"]),
|
| 64 |
"Dalio Composite": fmt(latest['dalio_composite_norm']),
|
| 65 |
"Stevenson Inequality": fmt(latest['stevenson_inequality_norm']),
|
|
@@ -69,10 +137,17 @@ def run_pipeline(days_back: int = 1825):
|
|
| 69 |
"Prob Stagflation": fmt(latest['prob_stagflation']),
|
| 70 |
"Prob Tech Boom": fmt(latest['prob_tech_boom']),
|
| 71 |
}
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
except Exception as e:
|
| 75 |
-
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
# Gradio UI
|
|
@@ -84,9 +159,18 @@ with gr.Blocks(title="🌍 Integrated Market Theory Dashboard") as demo:
|
|
| 84 |
days = gr.Slider(365, 2500, value=1825, step=90, label="Lookback Window (days)")
|
| 85 |
run_btn = gr.Button("🔄 Update Analysis", variant="primary")
|
| 86 |
|
| 87 |
-
|
|
|
|
|
|
|
| 88 |
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
-
# Launch (HF Spaces uses this)
|
| 92 |
demo.launch()
|
|
|
|
| 3 |
import os
|
| 4 |
import gradio as gr
|
| 5 |
import pandas as pd
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
import plotly.express as px
|
| 8 |
+
from geo_macro import UnifiedMarketDataDownloader
|
| 9 |
from feature_engineering import IntegratedTheoryFeatures
|
| 10 |
|
| 11 |
+
# Get FRED API key from environment (set in HF Spaces secrets)
|
| 12 |
+
FRED_API_KEY = os.getenv("FRED_API_KEY", "")
|
| 13 |
+
|
| 14 |
+
# Cache to avoid re-downloading data
|
| 15 |
_cached_df = None
|
| 16 |
_cached_dates = (None, None)
|
| 17 |
|
| 18 |
|
|
|
|
|
|
|
| 19 |
def get_data(start_date: str, end_date: str):
|
|
|
|
| 20 |
global _cached_df, _cached_dates
|
| 21 |
if _cached_df is not None and _cached_dates == (start_date, end_date):
|
| 22 |
return _cached_df.copy()
|
| 23 |
|
| 24 |
print(f"📥 Downloading data from {start_date} to {end_date}...")
|
| 25 |
+
downloader = UnifiedMarketDataDownloader(fred_api_key=FRED_API_KEY)
|
| 26 |
df = downloader.download_all_data(start_date=start_date, end_date=end_date)
|
| 27 |
|
| 28 |
_cached_df = df.copy()
|
|
|
|
| 30 |
return df
|
| 31 |
|
| 32 |
|
| 33 |
+
def create_composite_bar(latest):
|
| 34 |
+
"""Bar chart of the 4 core normalized scores"""
|
| 35 |
+
scores = {
|
| 36 |
+
"Dalio Composite": latest['dalio_composite_norm'],
|
| 37 |
+
"Stevenson Inequality": latest['stevenson_inequality_norm'],
|
| 38 |
+
"Thiel Monopoly": latest['thiel_monopoly_norm'],
|
| 39 |
+
"Gundlach Reckoning": latest['gundlach_reckoning_norm'],
|
| 40 |
+
}
|
| 41 |
+
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728"]
|
| 42 |
+
fig = go.Figure(go.Bar(
|
| 43 |
+
x=list(scores.keys()),
|
| 44 |
+
y=list(scores.values()),
|
| 45 |
+
marker_color=colors,
|
| 46 |
+
text=[f"{v:.3f}" for v in scores.values()],
|
| 47 |
+
textposition='auto',
|
| 48 |
+
))
|
| 49 |
+
fig.update_layout(
|
| 50 |
+
title="Core Theory Scores (Normalized)",
|
| 51 |
+
yaxis_range=[-1, 1],
|
| 52 |
+
height=350
|
| 53 |
+
)
|
| 54 |
+
return fig
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def create_probabilities_bar(latest):
|
| 58 |
+
"""Horizontal bar chart of scenario probabilities"""
|
| 59 |
+
probs = {
|
| 60 |
+
"Credit Collapse": latest['prob_credit_collapse'],
|
| 61 |
+
"Stagflation": latest['prob_stagflation'],
|
| 62 |
+
"Tech Boom": latest['prob_tech_boom'],
|
| 63 |
+
}
|
| 64 |
+
fig = go.Figure(go.Bar(
|
| 65 |
+
x=list(probs.values()),
|
| 66 |
+
y=list(probs.keys()),
|
| 67 |
+
orientation='h',
|
| 68 |
+
marker_color=["#d62728", "#ff7f0e", "#2ca02c"],
|
| 69 |
+
text=[f"{v:.1%}" for v in probs.values()],
|
| 70 |
+
textposition='auto',
|
| 71 |
+
))
|
| 72 |
+
fig.update_layout(
|
| 73 |
+
title="Scenario Probabilities",
|
| 74 |
+
xaxis_tickformat='.0%',
|
| 75 |
+
xaxis_range=[0, 1],
|
| 76 |
+
height=250
|
| 77 |
+
)
|
| 78 |
+
return fig
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def create_regime_timeline(features):
|
| 82 |
+
"""Timeline of regime over last 252 days"""
|
| 83 |
+
tail = features[['regime']].tail(252).copy()
|
| 84 |
+
tail['date'] = tail.index
|
| 85 |
+
color_map = {
|
| 86 |
+
'CRISIS': '#d62728',
|
| 87 |
+
'INEQUALITY_TRAP': '#ff7f0e',
|
| 88 |
+
'GEOPOLITICAL_SHOCK': '#9467bd',
|
| 89 |
+
'TECH_MONOPOLY': '#2ca02c',
|
| 90 |
+
'TRANSITION': '#7f7f7f'
|
| 91 |
+
}
|
| 92 |
+
tail['color'] = tail['regime'].map(color_map)
|
| 93 |
+
fig = go.Figure(go.Scatter(
|
| 94 |
+
x=tail['date'],
|
| 95 |
+
y=tail['regime'],
|
| 96 |
+
mode='markers',
|
| 97 |
+
marker=dict(color=tail['color'], size=8),
|
| 98 |
+
showlegend=False
|
| 99 |
+
))
|
| 100 |
+
fig.update_layout(
|
| 101 |
+
title="Regime Timeline (Last 12 Months)",
|
| 102 |
+
height=200,
|
| 103 |
+
yaxis_title="Regime",
|
| 104 |
+
xaxis_title="Date"
|
| 105 |
+
)
|
| 106 |
+
return fig
|
| 107 |
+
|
| 108 |
+
|
| 109 |
def run_pipeline(days_back: int = 1825):
|
| 110 |
try:
|
|
|
|
| 111 |
today = pd.Timestamp.today()
|
| 112 |
start_date = (today - pd.Timedelta(days=days_back)).strftime('%Y-%m-%d')
|
| 113 |
end_date = today.strftime('%Y-%m-%d')
|
| 114 |
|
|
|
|
| 115 |
df = get_data(start_date, end_date)
|
|
|
|
|
|
|
| 116 |
if len(df) < 300:
|
| 117 |
+
return {"Error": "Insufficient data. Try increasing lookback window."}, None, None, None
|
| 118 |
|
|
|
|
| 119 |
engine = IntegratedTheoryFeatures(df)
|
| 120 |
features = engine.build_all_features()
|
|
|
|
|
|
|
| 121 |
latest = features.dropna(subset=['regime']).iloc[-1]
|
| 122 |
|
| 123 |
+
# Format JSON
|
| 124 |
def fmt(x):
|
| 125 |
+
if pd.isna(x): return "N/A"
|
| 126 |
+
if isinstance(x, float) and -1 <= x <= 1: return f"{x:.3f}"
|
| 127 |
+
elif isinstance(x, float) and 0 <= x <= 1: return f"{x:.1%}"
|
| 128 |
+
else: return str(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
+
json_output = {
|
| 131 |
"Regime": str(latest["regime"]),
|
| 132 |
"Dalio Composite": fmt(latest['dalio_composite_norm']),
|
| 133 |
"Stevenson Inequality": fmt(latest['stevenson_inequality_norm']),
|
|
|
|
| 137 |
"Prob Stagflation": fmt(latest['prob_stagflation']),
|
| 138 |
"Prob Tech Boom": fmt(latest['prob_tech_boom']),
|
| 139 |
}
|
| 140 |
+
|
| 141 |
+
# Create plots
|
| 142 |
+
composite_fig = create_composite_bar(latest)
|
| 143 |
+
prob_fig = create_probabilities_bar(latest)
|
| 144 |
+
timeline_fig = create_regime_timeline(features)
|
| 145 |
+
|
| 146 |
+
return json_output, composite_fig, prob_fig, timeline_fig
|
| 147 |
|
| 148 |
except Exception as e:
|
| 149 |
+
error = {"Error": str(e)}
|
| 150 |
+
return error, None, None, None
|
| 151 |
|
| 152 |
|
| 153 |
# Gradio UI
|
|
|
|
| 159 |
days = gr.Slider(365, 2500, value=1825, step=90, label="Lookback Window (days)")
|
| 160 |
run_btn = gr.Button("🔄 Update Analysis", variant="primary")
|
| 161 |
|
| 162 |
+
with gr.Row():
|
| 163 |
+
json_output = gr.JSON(label="Current State")
|
| 164 |
+
composite_plot = gr.Plot(label="Core Theory Scores")
|
| 165 |
|
| 166 |
+
with gr.Row():
|
| 167 |
+
prob_plot = gr.Plot(label="Scenario Probabilities")
|
| 168 |
+
timeline_plot = gr.Plot(label="Regime Timeline")
|
| 169 |
+
|
| 170 |
+
run_btn.click(
|
| 171 |
+
run_pipeline,
|
| 172 |
+
inputs=days,
|
| 173 |
+
outputs=[json_output, composite_plot, prob_plot, timeline_plot]
|
| 174 |
+
)
|
| 175 |
|
|
|
|
| 176 |
demo.launch()
|