Update scripts/app.py
Browse files- scripts/app.py +504 -225
scripts/app.py
CHANGED
|
@@ -10,232 +10,395 @@ import os
|
|
| 10 |
import sys
|
| 11 |
import json
|
| 12 |
import torch
|
| 13 |
-
import
|
| 14 |
-
import
|
| 15 |
-
|
| 16 |
-
# --- Fix YFinance Cache Lock ---
|
| 17 |
-
try:
|
| 18 |
-
cache_dir = "/tmp/pytz_cache"
|
| 19 |
-
if os.path.exists(cache_dir): shutil.rmtree(cache_dir)
|
| 20 |
-
os.makedirs(cache_dir, exist_ok=True)
|
| 21 |
-
yf.set_tz_cache_location(cache_dir)
|
| 22 |
-
except: pass
|
| 23 |
-
|
| 24 |
-
# --- Add project root to sys.path ---
|
| 25 |
-
try:
|
| 26 |
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 27 |
-
project_root = os.path.dirname(script_dir)
|
| 28 |
-
if project_root not in sys.path: sys.path.insert(0, project_root)
|
| 29 |
-
except NameError:
|
| 30 |
-
project_root = os.getcwd()
|
| 31 |
-
if project_root not in sys.path: sys.path.insert(0, project_root)
|
| 32 |
-
|
| 33 |
-
print(f"Project Root set to: {project_root}")
|
| 34 |
-
|
| 35 |
-
# --- Imports ---
|
| 36 |
-
from scripts.fetch_market_data import fetch_market_data, ASSETS, FRED_IDS
|
| 37 |
-
from scripts.llm_analysis_rag import analyze_agent_decision, analyze_historical_segment, setup_rag_chain, query_rag_chain
|
| 38 |
from stable_baselines3 import SAC
|
| 39 |
-
from
|
| 40 |
-
from
|
| 41 |
|
| 42 |
# --- Configuration ---
|
| 43 |
-
MODEL_PATH = os.path.join(
|
| 44 |
WINDOW_SIZE = 30
|
| 45 |
MACRO_COLS = list(FRED_IDS.values())
|
| 46 |
-
DASHBOARD_DATA_PATH = os.path.join(
|
|
|
|
|
|
|
| 47 |
TRAIN_START_DATE = "2015-01-01"
|
| 48 |
-
TRAIN_END_DATE = "
|
|
|
|
|
|
|
| 49 |
DASHBOARD_DATA_DF = None
|
| 50 |
|
|
|
|
| 51 |
TIME_PERIODS = {
|
| 52 |
-
"6 Months": 180,
|
| 53 |
-
"
|
|
|
|
|
|
|
|
|
|
| 54 |
}
|
| 55 |
|
| 56 |
-
#
|
|
|
|
|
|
|
|
|
|
| 57 |
def initialize_dashboard_data():
|
|
|
|
| 58 |
global DASHBOARD_DATA_DF
|
| 59 |
-
print("--- Initializing Data ---")
|
|
|
|
|
|
|
| 60 |
end_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
|
| 61 |
start_date = (datetime.now() - timedelta(days=365*6)).strftime('%Y-%m-%d')
|
| 62 |
-
|
| 63 |
-
|
|
|
|
| 64 |
fetch_market_data(start_date, end_date, DASHBOARD_DATA_PATH)
|
| 65 |
|
| 66 |
if os.path.exists(DASHBOARD_DATA_PATH):
|
| 67 |
DASHBOARD_DATA_DF = pd.read_csv(DASHBOARD_DATA_PATH, index_col=0, parse_dates=True)
|
| 68 |
-
|
|
|
|
|
|
|
| 69 |
asset_cols = [c for c in ASSETS if c in DASHBOARD_DATA_DF.columns]
|
| 70 |
if asset_cols:
|
| 71 |
DASHBOARD_DATA_DF['Daily_Ret_Eq'] = DASHBOARD_DATA_DF[asset_cols].pct_change().mean(axis=1)
|
| 72 |
-
print(f"Data loaded. Shape: {DASHBOARD_DATA_DF.shape}")
|
|
|
|
| 73 |
else:
|
| 74 |
-
print("โ Failed to initialize data.")
|
| 75 |
|
|
|
|
| 76 |
try:
|
| 77 |
-
setup_rag_chain()
|
| 78 |
initialize_dashboard_data()
|
| 79 |
except Exception as e:
|
| 80 |
-
print(f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
# --- Helper Functions ---
|
| 83 |
def evaluate_agent_pro(env, model):
|
|
|
|
|
|
|
|
|
|
| 84 |
obs, info = env.reset()
|
| 85 |
terminated, truncated = False, False
|
| 86 |
-
portfolio_values = [env.
|
|
|
|
| 87 |
while not (terminated or truncated):
|
| 88 |
action, _states = model.predict(obs, deterministic=True)
|
| 89 |
obs, reward, terminated, truncated, info = env.step(action)
|
| 90 |
portfolio_values.append(info['portfolio_value'])
|
|
|
|
|
|
|
| 91 |
valid_dates = env.df.index[env.window_size-1:]
|
| 92 |
return pd.Series(portfolio_values, index=valid_dates[:len(portfolio_values)])
|
| 93 |
|
| 94 |
def calculate_metrics_pro(portfolio_values, freq=252, rf=0.0):
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
returns = portfolio_values.pct_change().dropna()
|
| 97 |
-
if returns.empty:
|
|
|
|
| 98 |
|
| 99 |
total_return = (portfolio_values.iloc[-1] / portfolio_values.iloc[0]) - 1
|
| 100 |
num_years = (len(portfolio_values) - 1) / freq
|
| 101 |
cagr = (portfolio_values.iloc[-1] / portfolio_values.iloc[0]) ** (1/num_years) - 1 if num_years > 0 else 0.0
|
| 102 |
-
|
|
|
|
|
|
|
| 103 |
downside_returns = returns[returns < 0]
|
| 104 |
-
|
|
|
|
|
|
|
| 105 |
volatility = returns.std() * np.sqrt(freq)
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
return {
|
| 111 |
-
"Total Return": total_return,
|
| 112 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
"Calmar Ratio": calmar_ratio
|
| 114 |
}
|
| 115 |
|
|
|
|
|
|
|
|
|
|
| 116 |
def calculate_feature_importance(model, obs):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
obs_tensor = torch.as_tensor(obs, dtype=torch.float32, device=model.device)
|
| 118 |
-
if obs_tensor.dim() == 1: obs_tensor = obs_tensor.unsqueeze(0)
|
| 119 |
obs_tensor.requires_grad_()
|
| 120 |
-
|
|
|
|
| 121 |
actor = model.policy.actor
|
|
|
|
|
|
|
| 122 |
baseline = torch.zeros_like(obs_tensor)
|
|
|
|
|
|
|
| 123 |
steps = 50
|
|
|
|
|
|
|
| 124 |
scaled_inputs = [baseline + (float(i) / steps) * (obs_tensor - baseline) for i in range(steps + 1)]
|
| 125 |
-
|
| 126 |
grads = []
|
| 127 |
for scaled_input in scaled_inputs:
|
|
|
|
| 128 |
action_mean = actor(scaled_input)
|
|
|
|
|
|
|
|
|
|
| 129 |
target_output = action_mean.sum()
|
|
|
|
|
|
|
| 130 |
grad = torch.autograd.grad(outputs=target_output, inputs=scaled_input)[0]
|
| 131 |
grads.append(grad)
|
| 132 |
|
| 133 |
-
#
|
| 134 |
-
|
| 135 |
-
avg_grads = (
|
| 136 |
-
avg_grads = avg_grads.mean(dim=0)
|
| 137 |
-
# -----------------------------------------------------------
|
| 138 |
|
|
|
|
| 139 |
integrated_grads = (obs_tensor - baseline) * avg_grads
|
|
|
|
|
|
|
| 140 |
importance_scores = integrated_grads.detach().cpu().numpy().flatten()
|
| 141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
feature_names = []
|
| 143 |
for i in range(WINDOW_SIZE):
|
| 144 |
-
for asset in ASSETS:
|
|
|
|
| 145 |
for i in range(WINDOW_SIZE):
|
| 146 |
-
for macro in MACRO_COLS:
|
|
|
|
| 147 |
|
|
|
|
| 148 |
feature_importance_dict = dict(zip(feature_names, importance_scores))
|
|
|
|
|
|
|
| 149 |
aggregated_importance = {}
|
| 150 |
for base_feature in ASSETS + MACRO_COLS:
|
| 151 |
total_imp = sum(abs(val) for key, val in feature_importance_dict.items() if key.startswith(base_feature))
|
| 152 |
aggregated_importance[base_feature] = total_imp
|
| 153 |
|
|
|
|
| 154 |
top_features = dict(sorted(aggregated_importance.items(), key=lambda item: item[1], reverse=True)[:8])
|
| 155 |
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
return fig
|
| 163 |
|
| 164 |
-
#
|
|
|
|
|
|
|
|
|
|
| 165 |
def run_historical_simulation(start_date_str, end_date_str):
|
| 166 |
-
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
try:
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
if start_date < DASHBOARD_DATA_DF.index.min() or end_date > DASHBOARD_DATA_DF.index.max():
|
| 171 |
-
|
| 172 |
-
|
|
|
|
|
|
|
|
|
|
| 173 |
df_slice = DASHBOARD_DATA_DF.loc[start_date:end_date].copy()
|
| 174 |
asset_cols_only = [c for c in ASSETS if c in df_slice.columns]
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
model = SAC.load(MODEL_PATH)
|
| 180 |
-
rl_vals = evaluate_agent_pro(env, model)
|
| 181 |
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
|
|
|
| 187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
fig = go.Figure()
|
| 189 |
-
fig.add_trace(go.Scatter(x=
|
| 190 |
-
fig.add_trace(go.Scatter(x=
|
| 191 |
-
fig.add_trace(go.Scatter(x=
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
"
|
| 201 |
-
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
}
|
| 204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
except Exception as e:
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
|
| 208 |
-
def run_hist_analysis(selected_assets, period):
|
| 209 |
-
if DASHBOARD_DATA_DF is None: return go.Figure(), "No Data"
|
| 210 |
-
yield go.Figure(), "Analyzing..."
|
| 211 |
try:
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
except Exception as e:
|
| 226 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
|
| 228 |
-
def
|
|
|
|
| 229 |
print("Fetching prediction data...")
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
fetch_market_data(
|
| 235 |
-
if not os.path.exists(
|
| 236 |
-
df = pd.read_csv(
|
| 237 |
df.dropna(inplace=True)
|
| 238 |
-
if len(df) < window_size: raise Exception("Not enough data.")
|
| 239 |
return df.iloc[-window_size:].copy()
|
| 240 |
|
| 241 |
def prepare_observation(data_window):
|
|
@@ -244,141 +407,257 @@ def prepare_observation(data_window):
|
|
| 244 |
norm_prices = price_data / (price_data[0] + 1e-8)
|
| 245 |
norm_macro = macro_data / (macro_data[0] + 1e-8)
|
| 246 |
obs = np.concatenate([norm_prices, norm_macro], axis=1)
|
| 247 |
-
# Return flattened obs for
|
| 248 |
-
# (Note: XAI calc func re-shapes it internally, but expects a tensor)
|
| 249 |
return obs.flatten().astype(np.float32), obs.astype(np.float32), data_window
|
| 250 |
|
| 251 |
def predict_and_analyze():
|
| 252 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
try:
|
| 254 |
-
data_window =
|
|
|
|
| 255 |
flat_obs, raw_obs, df_window_for_analyst = prepare_observation(data_window)
|
| 256 |
-
|
| 257 |
-
if not os.path.exists(MODEL_PATH): raise FileNotFoundError("Model not found
|
| 258 |
model = SAC.load(MODEL_PATH)
|
| 259 |
-
|
| 260 |
-
# --- FIX 2: Pass the FLATTENED observation to XAI function ---
|
| 261 |
-
# The XAI function logic expects an input that matches the model's input layer.
|
| 262 |
-
yield "XAI Calc...", None, go.Figure(), "Calculating XAI..."
|
| 263 |
-
xai_plot = calculate_feature_importance(model, flat_obs)
|
| 264 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
action, _ = model.predict(flat_obs, deterministic=True)
|
| 266 |
-
|
| 267 |
-
weights =
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
alloc_df =
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 292 |
else:
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
yield
|
| 296 |
except Exception as e:
|
| 297 |
import traceback
|
| 298 |
traceback.print_exc()
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
#
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
dates =
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
return fig
|
| 317 |
|
| 318 |
-
def
|
| 319 |
-
|
| 320 |
-
|
|
|
|
|
|
|
|
|
|
| 321 |
return fig
|
| 322 |
|
| 323 |
-
def
|
| 324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
|
| 326 |
-
# --- UI ---
|
| 327 |
custom_css = """
|
| 328 |
.metric-box { background-color: #1f2937; padding: 20px; border-radius: 12px; border: 1px solid #374151; text-align: center; }
|
| 329 |
-
.metric-label { font-size: 1.1em; color: #9ca3af; }
|
| 330 |
.metric-value { font-size: 2.2em; font-weight: 700; color: #e5e7eb; }
|
|
|
|
| 331 |
"""
|
| 332 |
|
| 333 |
-
#
|
| 334 |
-
|
| 335 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 336 |
gr.Markdown("# ๐ง Deep RL & LLM Portfolio Manager")
|
| 337 |
-
|
| 338 |
with gr.Tabs():
|
| 339 |
-
|
| 340 |
-
|
|
|
|
| 341 |
with gr.Row():
|
| 342 |
-
|
| 343 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 344 |
with gr.Row():
|
| 345 |
-
with gr.Column(scale=3):
|
| 346 |
-
|
|
|
|
|
|
|
| 347 |
with gr.Row():
|
| 348 |
-
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
|
| 350 |
-
with gr.TabItem("๐ฎ Forecast"):
|
| 351 |
-
btn = gr.Button("๐ Run Analysis", variant="primary")
|
| 352 |
-
stat = gr.Textbox(label="Status", interactive=False)
|
| 353 |
with gr.Row():
|
|
|
|
| 354 |
with gr.Column(scale=2):
|
| 355 |
-
|
| 356 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 357 |
with gr.Column(scale=3):
|
| 358 |
-
|
| 359 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
|
| 361 |
-
with gr.TabItem("๐
Analyst"):
|
| 362 |
with gr.Row():
|
| 363 |
with gr.Column(scale=1):
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 368 |
with gr.Column(scale=3):
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
|
| 373 |
-
with gr.TabItem("๐ Simulation"):
|
| 374 |
with gr.Row():
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 382 |
|
| 383 |
if __name__ == "__main__":
|
| 384 |
demo.queue().launch(server_name="0.0.0.0", server_port=7860, debug=True, share=True)
|
|
|
|
| 10 |
import sys
|
| 11 |
import json
|
| 12 |
import torch
|
| 13 |
+
from fetch_market_data import fetch_market_data, ASSETS, FRED_IDS
|
| 14 |
+
from llm_analysis_rag import analyze_agent_decision, analyze_historical_segment
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
from stable_baselines3 import SAC
|
| 16 |
+
from environment import PortfolioEnv
|
| 17 |
+
from evaluate_baselines import buy_and_hold, equally_weighted_rebalanced
|
| 18 |
|
| 19 |
# --- Configuration ---
|
| 20 |
+
MODEL_PATH = os.path.join("checkpoints", "sac_portfolio_model.zip")
|
| 21 |
WINDOW_SIZE = 30
|
| 22 |
MACRO_COLS = list(FRED_IDS.values())
|
| 23 |
+
DASHBOARD_DATA_PATH = os.path.join("data", "historical_dashboard_data.csv")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
TRAIN_START_DATE = "2015-01-01"
|
| 27 |
+
TRAIN_END_DATE = "2020-12-31"
|
| 28 |
+
|
| 29 |
+
# Global variable for dashboard data needed for Tabs 3 & 4
|
| 30 |
DASHBOARD_DATA_DF = None
|
| 31 |
|
| 32 |
+
# Define Time Period mappings for the dropdown
|
| 33 |
TIME_PERIODS = {
|
| 34 |
+
"6 Months": 180,
|
| 35 |
+
"1 Year": 365,
|
| 36 |
+
"2 Years": 730,
|
| 37 |
+
"5 Years": 1825,
|
| 38 |
+
"Max Available": 9999 # Sentinel value for max
|
| 39 |
}
|
| 40 |
|
| 41 |
+
# =========================================
|
| 42 |
+
# Initialization Functions
|
| 43 |
+
# =========================================
|
| 44 |
+
|
| 45 |
def initialize_dashboard_data():
|
| 46 |
+
"""Fetches and loads historical data at startup for Tabs 3 & 4."""
|
| 47 |
global DASHBOARD_DATA_DF
|
| 48 |
+
print("--- Initializing Historical Data for Analyst/Simulation Tabs ---")
|
| 49 |
+
|
| 50 |
+
# Fetching last 6 years to support longer analysis periods and simulation
|
| 51 |
end_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
|
| 52 |
start_date = (datetime.now() - timedelta(days=365*6)).strftime('%Y-%m-%d')
|
| 53 |
+
|
| 54 |
+
print(f"Fetching historical data from {start_date} to {end_date}...")
|
| 55 |
+
# This might take a minute on first run
|
| 56 |
fetch_market_data(start_date, end_date, DASHBOARD_DATA_PATH)
|
| 57 |
|
| 58 |
if os.path.exists(DASHBOARD_DATA_PATH):
|
| 59 |
DASHBOARD_DATA_DF = pd.read_csv(DASHBOARD_DATA_PATH, index_col=0, parse_dates=True)
|
| 60 |
+
# Basic cleaning
|
| 61 |
+
DASHBOARD_DATA_DF.dropna(how='all', inplace=True)
|
| 62 |
+
# Calculate equal weight return for dashboard metrics
|
| 63 |
asset_cols = [c for c in ASSETS if c in DASHBOARD_DATA_DF.columns]
|
| 64 |
if asset_cols:
|
| 65 |
DASHBOARD_DATA_DF['Daily_Ret_Eq'] = DASHBOARD_DATA_DF[asset_cols].pct_change().mean(axis=1)
|
| 66 |
+
print(f"Data loaded successfully. Shape: {DASHBOARD_DATA_DF.shape}")
|
| 67 |
+
print(f"Data range: {DASHBOARD_DATA_DF.index.min().date()} to {DASHBOARD_DATA_DF.index.max().date()}")
|
| 68 |
else:
|
| 69 |
+
print("โ Failed to initialize historical data.")
|
| 70 |
|
| 71 |
+
# Initialize data at startup
|
| 72 |
try:
|
|
|
|
| 73 |
initialize_dashboard_data()
|
| 74 |
except Exception as e:
|
| 75 |
+
print(f"Warning: Data initialization failed. Error: {e}")
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# =========================================
|
| 79 |
+
# Professional Metrics & Evaluation Functions
|
| 80 |
+
# =========================================
|
| 81 |
|
|
|
|
| 82 |
def evaluate_agent_pro(env, model):
|
| 83 |
+
"""
|
| 84 |
+
Runs the trained agent on the environment and returns portfolio values.
|
| 85 |
+
"""
|
| 86 |
obs, info = env.reset()
|
| 87 |
terminated, truncated = False, False
|
| 88 |
+
portfolio_values = [env.initial_amount]
|
| 89 |
+
|
| 90 |
while not (terminated or truncated):
|
| 91 |
action, _states = model.predict(obs, deterministic=True)
|
| 92 |
obs, reward, terminated, truncated, info = env.step(action)
|
| 93 |
portfolio_values.append(info['portfolio_value'])
|
| 94 |
+
|
| 95 |
+
# Align index with the actual steps taken
|
| 96 |
valid_dates = env.df.index[env.window_size-1:]
|
| 97 |
return pd.Series(portfolio_values, index=valid_dates[:len(portfolio_values)])
|
| 98 |
|
| 99 |
def calculate_metrics_pro(portfolio_values, freq=252, rf=0.0):
|
| 100 |
+
"""
|
| 101 |
+
Calculates key professional performance metrics from a series of portfolio values.
|
| 102 |
+
"""
|
| 103 |
+
if len(portfolio_values) < 2:
|
| 104 |
+
return {k: "N/A" for k in ["Total Return", "CAGR", "Sharpe Ratio", "Sortino Ratio", "Volatility", "Max Drawdown", "Calmar Ratio"]}
|
| 105 |
+
|
| 106 |
returns = portfolio_values.pct_change().dropna()
|
| 107 |
+
if returns.empty:
|
| 108 |
+
return {k: "0.00%" if "%" in k else "0.00" for k in ["Total Return", "CAGR", "Sharpe Ratio", "Sortino Ratio", "Volatility", "Max Drawdown", "Calmar Ratio"]}
|
| 109 |
|
| 110 |
total_return = (portfolio_values.iloc[-1] / portfolio_values.iloc[0]) - 1
|
| 111 |
num_years = (len(portfolio_values) - 1) / freq
|
| 112 |
cagr = (portfolio_values.iloc[-1] / portfolio_values.iloc[0]) ** (1/num_years) - 1 if num_years > 0 else 0.0
|
| 113 |
+
|
| 114 |
+
sharpe_ratio = np.sqrt(freq) * (returns.mean() - rf) / returns.std() if returns.std() > 0 else np.nan
|
| 115 |
+
|
| 116 |
downside_returns = returns[returns < 0]
|
| 117 |
+
downside_std = downside_returns.std()
|
| 118 |
+
sortino_ratio = np.sqrt(freq) * (returns.mean() - rf) / downside_std if downside_std > 0 else np.nan
|
| 119 |
+
|
| 120 |
volatility = returns.std() * np.sqrt(freq)
|
| 121 |
+
|
| 122 |
+
rolling_max = portfolio_values.cummax()
|
| 123 |
+
drawdown = portfolio_values / rolling_max - 1.0
|
| 124 |
+
max_drawdown = drawdown.min()
|
| 125 |
+
|
| 126 |
+
calmar_ratio = cagr / abs(max_drawdown) if max_drawdown != 0 and cagr != 0 else np.nan
|
| 127 |
|
| 128 |
return {
|
| 129 |
+
"Total Return": total_return,
|
| 130 |
+
"CAGR": cagr,
|
| 131 |
+
"Sharpe Ratio": sharpe_ratio,
|
| 132 |
+
"Sortino Ratio": sortino_ratio,
|
| 133 |
+
"Volatility": volatility,
|
| 134 |
+
"Max Drawdown": max_drawdown,
|
| 135 |
"Calmar Ratio": calmar_ratio
|
| 136 |
}
|
| 137 |
|
| 138 |
+
# =========================================
|
| 139 |
+
# XAI: Feature Importance Function
|
| 140 |
+
# =========================================
|
| 141 |
def calculate_feature_importance(model, obs):
|
| 142 |
+
"""
|
| 143 |
+
Calculates feature importance using Integrated Gradients on the RL agent's policy network.
|
| 144 |
+
"""
|
| 145 |
+
# Convert observation to torch tensor and enable gradient tracking
|
| 146 |
obs_tensor = torch.as_tensor(obs, dtype=torch.float32, device=model.device)
|
|
|
|
| 147 |
obs_tensor.requires_grad_()
|
| 148 |
+
|
| 149 |
+
# Get the policy network (actor)
|
| 150 |
actor = model.policy.actor
|
| 151 |
+
|
| 152 |
+
# Define a baseline (e.g., a zero observation)
|
| 153 |
baseline = torch.zeros_like(obs_tensor)
|
| 154 |
+
|
| 155 |
+
# Number of steps for integral approximation
|
| 156 |
steps = 50
|
| 157 |
+
|
| 158 |
+
# Generate scaled inputs along the path from baseline to input
|
| 159 |
scaled_inputs = [baseline + (float(i) / steps) * (obs_tensor - baseline) for i in range(steps + 1)]
|
| 160 |
+
|
| 161 |
grads = []
|
| 162 |
for scaled_input in scaled_inputs:
|
| 163 |
+
# Forward pass to get action distribution parameters (mean)
|
| 164 |
action_mean = actor(scaled_input)
|
| 165 |
+
|
| 166 |
+
# We need a scalar output to calculate gradients against.
|
| 167 |
+
# Here we sum, representing overall sensitivity of the action vector.
|
| 168 |
target_output = action_mean.sum()
|
| 169 |
+
|
| 170 |
+
# Calculate gradients of the target output with respect to the input features
|
| 171 |
grad = torch.autograd.grad(outputs=target_output, inputs=scaled_input)[0]
|
| 172 |
grads.append(grad)
|
| 173 |
|
| 174 |
+
# Average the gradients using the trapezoidal rule approximation
|
| 175 |
+
avg_grads = (grads[:-1] + grads[1:]) / 2.0
|
| 176 |
+
avg_grads = torch.stack(avg_grads).mean(dim=0)
|
|
|
|
|
|
|
| 177 |
|
| 178 |
+
# Calculate Integrated Gradients: (input - baseline) * average_gradients
|
| 179 |
integrated_grads = (obs_tensor - baseline) * avg_grads
|
| 180 |
+
|
| 181 |
+
# Detach, move to cpu, and convert to numpy array
|
| 182 |
importance_scores = integrated_grads.detach().cpu().numpy().flatten()
|
| 183 |
+
|
| 184 |
+
# Feature Names mapping
|
| 185 |
+
num_assets = len(ASSETS)
|
| 186 |
+
num_macro = len(MACRO_COLS)
|
| 187 |
+
|
| 188 |
+
# Create feature names based on the observation structure
|
| 189 |
feature_names = []
|
| 190 |
for i in range(WINDOW_SIZE):
|
| 191 |
+
for asset in ASSETS:
|
| 192 |
+
feature_names.append(f"{asset}_t-{WINDOW_SIZE-1-i}")
|
| 193 |
for i in range(WINDOW_SIZE):
|
| 194 |
+
for macro in MACRO_COLS:
|
| 195 |
+
feature_names.append(f"{macro}_t-{WINDOW_SIZE-1-i}")
|
| 196 |
|
| 197 |
+
# Combine into a dictionary and sort by absolute importance
|
| 198 |
feature_importance_dict = dict(zip(feature_names, importance_scores))
|
| 199 |
+
|
| 200 |
+
# Aggregate importance by feature type (sum of absolute values across time steps)
|
| 201 |
aggregated_importance = {}
|
| 202 |
for base_feature in ASSETS + MACRO_COLS:
|
| 203 |
total_imp = sum(abs(val) for key, val in feature_importance_dict.items() if key.startswith(base_feature))
|
| 204 |
aggregated_importance[base_feature] = total_imp
|
| 205 |
|
| 206 |
+
# Sort and take top N for display
|
| 207 |
top_features = dict(sorted(aggregated_importance.items(), key=lambda item: item[1], reverse=True)[:8])
|
| 208 |
|
| 209 |
+
# Create a Plotly bar chart
|
| 210 |
+
fig = px.bar(
|
| 211 |
+
x=list(top_features.values()),
|
| 212 |
+
y=list(top_features.keys()),
|
| 213 |
+
orientation='h',
|
| 214 |
+
title="Top Influential Features (XAI)",
|
| 215 |
+
labels={'x': 'Relative Importance Score', 'y': 'Feature'},
|
| 216 |
+
color=list(top_features.values()),
|
| 217 |
+
color_continuous_scale=px.colors.sequential.Viridis
|
| 218 |
+
)
|
| 219 |
+
fig.update_layout(
|
| 220 |
+
template="plotly_dark",
|
| 221 |
+
paper_bgcolor='rgba(0,0,0,0)',
|
| 222 |
+
plot_bgcolor='rgba(0,0,0,0)',
|
| 223 |
+
yaxis={'categoryorder':'total ascending'},
|
| 224 |
+
coloraxis_showscale=False,
|
| 225 |
+
margin=dict(l=10, r=10, t=40, b=10),
|
| 226 |
+
height=300 # Keep it compact
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
return fig
|
| 230 |
|
| 231 |
+
# =========================================
|
| 232 |
+
# Tab 4 Logic: Historical Simulation
|
| 233 |
+
# =========================================
|
| 234 |
+
|
| 235 |
def run_historical_simulation(start_date_str, end_date_str):
|
| 236 |
+
"""
|
| 237 |
+
Runs the RL agent on historical data and compares to baselines using professional metrics.
|
| 238 |
+
"""
|
| 239 |
+
if DASHBOARD_DATA_DF is None:
|
| 240 |
+
return go.Figure(), "Data not initialized. Please restart app.", gr.update(visible=False)
|
| 241 |
+
|
| 242 |
+
status_msg = "Preparing simulation..."
|
| 243 |
+
yield go.Figure(), status_msg, gr.update(visible=False)
|
| 244 |
+
|
| 245 |
try:
|
| 246 |
+
# 1. Validate and Slice Data
|
| 247 |
+
try:
|
| 248 |
+
start_date = pd.to_datetime(start_date_str)
|
| 249 |
+
end_date = pd.to_datetime(end_date_str)
|
| 250 |
+
except ValueError:
|
| 251 |
+
yield go.Figure(), "Error: Invalid date format. Use YYYY-MM-DD.", gr.update(visible=False)
|
| 252 |
+
return
|
| 253 |
+
|
| 254 |
if start_date < DASHBOARD_DATA_DF.index.min() or end_date > DASHBOARD_DATA_DF.index.max():
|
| 255 |
+
avail_start = DASHBOARD_DATA_DF.index.min().date()
|
| 256 |
+
avail_end = DASHBOARD_DATA_DF.index.max().date()
|
| 257 |
+
yield go.Figure(), f"Error: Selected dates outside available range ({avail_start} to {avail_end}).", gr.update(visible=False)
|
| 258 |
+
return
|
| 259 |
+
|
| 260 |
df_slice = DASHBOARD_DATA_DF.loc[start_date:end_date].copy()
|
| 261 |
asset_cols_only = [c for c in ASSETS if c in df_slice.columns]
|
| 262 |
+
|
| 263 |
+
if len(df_slice) < WINDOW_SIZE + 10:
|
| 264 |
+
yield go.Figure(), "Error: Time period too short for simulation.", gr.update(visible=False)
|
| 265 |
+
return
|
| 266 |
+
|
| 267 |
+
# 2. Setup Environment and Agent
|
| 268 |
+
status_msg = "Running RL Agent simulation..."
|
| 269 |
+
yield go.Figure(), status_msg, gr.update(visible=False)
|
| 270 |
+
|
| 271 |
+
env = PortfolioEnv(df_slice, WINDOW_SIZE, initial_amount=10000)
|
| 272 |
+
|
| 273 |
+
if not os.path.exists(MODEL_PATH):
|
| 274 |
+
raise FileNotFoundError(f"Model not found: {MODEL_PATH}")
|
| 275 |
model = SAC.load(MODEL_PATH)
|
|
|
|
| 276 |
|
| 277 |
+
# 3. Run Simulation Loop & Get Values using Pro Function
|
| 278 |
+
rl_portfolio_series = evaluate_agent_pro(env, model)
|
| 279 |
+
|
| 280 |
+
# 4. Calculate Baselines using Pro Functions
|
| 281 |
+
status_msg = "Calculating baselines and metrics..."
|
| 282 |
+
yield go.Figure(), status_msg, gr.update(visible=False)
|
| 283 |
|
| 284 |
+
# Pass only asset columns to baseline functions
|
| 285 |
+
bnh_portfolio_series = buy_and_hold(df_slice[asset_cols_only], initial_amount=10000)
|
| 286 |
+
# Realign B&H index to match RL agent's start date
|
| 287 |
+
bnh_portfolio_series = bnh_portfolio_series.loc[rl_portfolio_series.index[0]:]
|
| 288 |
+
# Normalize B&H starting value to match RL agent's start
|
| 289 |
+
bnh_portfolio_series = bnh_portfolio_series / bnh_portfolio_series.iloc[0] * 10000
|
| 290 |
+
|
| 291 |
+
eq_portfolio_series = equally_weighted_rebalanced(df_slice[asset_cols_only], initial_amount=10000)
|
| 292 |
+
eq_portfolio_series = eq_portfolio_series.loc[rl_portfolio_series.index[0]:]
|
| 293 |
+
eq_portfolio_series = eq_portfolio_series / eq_portfolio_series.iloc[0] * 10000
|
| 294 |
+
|
| 295 |
+
# 5. Generate Plot
|
| 296 |
fig = go.Figure()
|
| 297 |
+
fig.add_trace(go.Scatter(x=rl_portfolio_series.index, y=rl_portfolio_series, mode='lines', name='RL Agent (SAC)', line=dict(color='#10b981', width=3)))
|
| 298 |
+
fig.add_trace(go.Scatter(x=bnh_portfolio_series.index, y=bnh_portfolio_series, mode='lines', name='Buy & Hold (SPY)', line=dict(color='#6b7280', dash='dash')))
|
| 299 |
+
fig.add_trace(go.Scatter(x=eq_portfolio_series.index, y=eq_portfolio_series, mode='lines', name='Equal Weighted', line=dict(color='#a855f7', dash='dot')))
|
| 300 |
+
|
| 301 |
+
fig.update_layout(
|
| 302 |
+
title="Simulation: Strategy Performance Comparison ($10k Start)",
|
| 303 |
+
xaxis_title="Date",
|
| 304 |
+
yaxis_title="Portfolio Value ($)",
|
| 305 |
+
template="plotly_dark",
|
| 306 |
+
paper_bgcolor='rgba(0,0,0,0)',
|
| 307 |
+
plot_bgcolor='rgba(0,0,0,0)',
|
| 308 |
+
hovermode="x unified",
|
| 309 |
+
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1)
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
# 6. Calculate Professional Metrics Table
|
| 313 |
+
rl_m = calculate_metrics_pro(rl_portfolio_series)
|
| 314 |
+
bnh_m = calculate_metrics_pro(bnh_portfolio_series)
|
| 315 |
+
eq_m = calculate_metrics_pro(eq_portfolio_series)
|
| 316 |
+
|
| 317 |
+
# Helper to format based on metric type
|
| 318 |
+
def fmt(val, is_pct=True):
|
| 319 |
+
if pd.isna(val): return "N/A"
|
| 320 |
+
return f"{val:.2%}" if is_pct else f"{val:.2f}"
|
| 321 |
+
|
| 322 |
+
metrics_data = {
|
| 323 |
+
"Metric": ["Total Return", "CAGR", "Sharpe Ratio", "Sortino Ratio", "Volatility (Ann.)", "Max Drawdown", "Calmar Ratio"],
|
| 324 |
+
"RL Agent (SAC)": [fmt(rl_m["Total Return"]), fmt(rl_m["CAGR"]), fmt(rl_m["Sharpe Ratio"], False), fmt(rl_m["Sortino Ratio"], False), fmt(rl_m["Volatility"]), fmt(rl_m["Max Drawdown"]), fmt(rl_m["Calmar Ratio"], False)],
|
| 325 |
+
"Buy & Hold (SPY)": [fmt(bnh_m["Total Return"]), fmt(bnh_m["CAGR"]), fmt(bnh_m["Sharpe Ratio"], False), fmt(bnh_m["Sortino Ratio"], False), fmt(bnh_m["Volatility"]), fmt(bnh_m["Max Drawdown"]), fmt(bnh_m["Calmar Ratio"], False)],
|
| 326 |
+
"Equal Weighted": [fmt(eq_m["Total Return"]), fmt(eq_m["CAGR"]), fmt(eq_m["Sharpe Ratio"], False), fmt(eq_m["Sortino Ratio"], False), fmt(eq_m["Volatility"]), fmt(eq_m["Max Drawdown"]), fmt(eq_m["Calmar Ratio"], False)],
|
| 327 |
}
|
| 328 |
+
metrics_df = pd.DataFrame(metrics_data)
|
| 329 |
+
|
| 330 |
+
# Format the dataframe as a markdown table for cleaner display
|
| 331 |
+
metrics_md = metrics_df.to_markdown(index=False)
|
| 332 |
+
final_metrics_display = f"### ๐ Professional Performance Metrics\n\n{metrics_md}"
|
| 333 |
+
|
| 334 |
+
yield fig, "Simulation Complete.", final_metrics_display
|
| 335 |
+
|
| 336 |
except Exception as e:
|
| 337 |
+
import traceback
|
| 338 |
+
traceback.print_exc()
|
| 339 |
+
yield go.Figure(), f"Error during simulation: {str(e)}", gr.update(visible=False)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
# =========================================
|
| 343 |
+
# Tab 3 Logic: Historical Data Analyst
|
| 344 |
+
# =========================================
|
| 345 |
+
|
| 346 |
+
def run_historical_analysis(selected_assets, period_name):
|
| 347 |
+
"""Backend for Tab 3."""
|
| 348 |
+
if DASHBOARD_DATA_DF is None or not selected_assets:
|
| 349 |
+
return go.Figure(), "Please wait for data initialization or select assets."
|
| 350 |
+
|
| 351 |
+
status_html = """<div style="color: #9ca3af;">๐ Processing data and running AI analysis...</div>"""
|
| 352 |
+
yield go.Figure(), status_html
|
| 353 |
|
|
|
|
|
|
|
|
|
|
| 354 |
try:
|
| 355 |
+
# 1. Filter Data by Time Period
|
| 356 |
+
days = TIME_PERIODS.get(period_name, 365)
|
| 357 |
+
cutoff_date = datetime.now() - timedelta(days=days)
|
| 358 |
+
valid_assets = [a for a in selected_assets if a in DASHBOARD_DATA_DF.columns]
|
| 359 |
+
if not valid_assets:
|
| 360 |
+
yield go.Figure(), "Error: Selected assets not found in available data."
|
| 361 |
+
return
|
| 362 |
+
df_filtered = DASHBOARD_DATA_DF.loc[cutoff_date:, valid_assets].copy()
|
| 363 |
+
if df_filtered.empty:
|
| 364 |
+
yield go.Figure(), f"No data found for the selected period: {period_name}"
|
| 365 |
+
return
|
| 366 |
+
|
| 367 |
+
# 2. Generate Normalized Price Plot
|
| 368 |
+
df_normalized = df_filtered / df_filtered.iloc[0] * 100
|
| 369 |
+
fig = px.line(df_normalized, x=df_normalized.index, y=df_normalized.columns,
|
| 370 |
+
title=f"Performance Comparison: {period_name} (Base=100)",
|
| 371 |
+
color_discrete_sequence=px.colors.qualitative.Bold)
|
| 372 |
+
fig.update_layout(template="plotly_dark", paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)',
|
| 373 |
+
yaxis_title="Normalized Price", xaxis_title="Date", legend_title_text="", hovermode="x unified")
|
| 374 |
+
|
| 375 |
+
# 3. Run AI Analysis
|
| 376 |
+
analysis_text = analyze_historical_segment(df_filtered, valid_assets, period_name)
|
| 377 |
+
formatted_analysis = f"### ๐ค AI Analyst Report: {period_name}\n\n{analysis_text}"
|
| 378 |
+
yield fig, formatted_analysis
|
| 379 |
+
|
| 380 |
except Exception as e:
|
| 381 |
+
import traceback
|
| 382 |
+
traceback.print_exc()
|
| 383 |
+
yield go.Figure(), f"### Error during analysis\n\n{str(e)}"
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
# =========================================
|
| 387 |
+
# Tab 2 Logic: Forecast & Analysis (XAI)
|
| 388 |
+
# =========================================
|
| 389 |
|
| 390 |
+
def get_latest_data_window(window_size=30):
|
| 391 |
+
"""Fetches latest data needed for prediction."""
|
| 392 |
print("Fetching prediction data...")
|
| 393 |
+
lookback_days = window_size + 150
|
| 394 |
+
end_date = datetime.now().strftime('%Y-%m-%d')
|
| 395 |
+
start_date = (datetime.now() - timedelta(days=lookback_days)).strftime('%Y-%m-%d')
|
| 396 |
+
temp_filename = os.path.join(project_root, "data", "temp_gradio_prediction_data.csv")
|
| 397 |
+
fetch_market_data(start_date, end_date, temp_filename)
|
| 398 |
+
if not os.path.exists(temp_filename): raise Exception("Failed to fetch market data file.")
|
| 399 |
+
df = pd.read_csv(temp_filename, index_col=0, parse_dates=True)
|
| 400 |
df.dropna(inplace=True)
|
| 401 |
+
if len(df) < window_size: raise Exception(f"Not enough clean data fetched for prediction.")
|
| 402 |
return df.iloc[-window_size:].copy()
|
| 403 |
|
| 404 |
def prepare_observation(data_window):
|
|
|
|
| 407 |
norm_prices = price_data / (price_data[0] + 1e-8)
|
| 408 |
norm_macro = macro_data / (macro_data[0] + 1e-8)
|
| 409 |
obs = np.concatenate([norm_prices, norm_macro], axis=1)
|
| 410 |
+
# Return both flattened obs for model and raw obs for XAI
|
|
|
|
| 411 |
return obs.flatten().astype(np.float32), obs.astype(np.float32), data_window
|
| 412 |
|
| 413 |
def predict_and_analyze():
|
| 414 |
+
"""Main function for Forecast Tab."""
|
| 415 |
+
status_msg = "Starting process..."
|
| 416 |
+
loading_html = """<div style="color: #9ca3af;">๐ Fetching data & running prediction...</div>"""
|
| 417 |
+
# Update to yield an empty plot for the XAI chart initially
|
| 418 |
+
yield status_msg, None, go.Figure(), loading_html
|
| 419 |
+
|
| 420 |
try:
|
| 421 |
+
data_window = get_latest_data_window(WINDOW_SIZE)
|
| 422 |
+
# Get flattened obs for prediction and raw obs for XAI
|
| 423 |
flat_obs, raw_obs, df_window_for_analyst = prepare_observation(data_window)
|
| 424 |
+
|
| 425 |
+
if not os.path.exists(MODEL_PATH): raise FileNotFoundError(f"Model not found: {MODEL_PATH}")
|
| 426 |
model = SAC.load(MODEL_PATH)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
|
| 428 |
+
# --- XAI: Calculate Feature Importance ---
|
| 429 |
+
status_msg = "Calculating feature importance..."
|
| 430 |
+
yield status_msg, None, go.Figure(), loading_html
|
| 431 |
+
xai_plot = calculate_feature_importance(model, raw_obs)
|
| 432 |
+
|
| 433 |
+
# --- Prediction ---
|
| 434 |
action, _ = model.predict(flat_obs, deterministic=True)
|
| 435 |
+
exp_action = np.exp(np.asarray(action).flatten())
|
| 436 |
+
weights = exp_action / np.sum(exp_action)
|
| 437 |
+
allocations_dict = {asset: weights[i] for i, asset in enumerate(ASSETS)}
|
| 438 |
+
allocations_dict['Cash'] = weights[-1]
|
| 439 |
+
alloc_df = pd.DataFrame(list(allocations_dict.items()), columns=['Asset', 'Proposed Allocation'])
|
| 440 |
+
alloc_df['Proposed Allocation'] = alloc_df['Proposed Allocation'].apply(lambda x: f"{x:.2%}")
|
| 441 |
+
|
| 442 |
+
status_msg = "Prediction done. Running AI Risk Analysis..."
|
| 443 |
+
analysing_html = """<div style="color: #9ca3af;">๐ค Running Qwen-2.5-3B Risk Analysis...</div>"""
|
| 444 |
+
# Yield XAI plot along with other outputs
|
| 445 |
+
yield status_msg, alloc_df, xai_plot, analysing_html
|
| 446 |
+
|
| 447 |
+
allocations_for_llm = {k: float(v) for k, v in allocations_dict.items()}
|
| 448 |
+
analysis_result = analyze_agent_decision(df_window_for_analyst, allocations_for_llm)
|
| 449 |
+
status_msg = "Analysis complete!"
|
| 450 |
+
|
| 451 |
+
if isinstance(analysis_result, dict):
|
| 452 |
+
strat = analysis_result.get('strategy_summary', 'N/A')
|
| 453 |
+
risk = analysis_result.get('risk_level', 'N/A').upper()
|
| 454 |
+
just = analysis_result.get('justification', 'N/A')
|
| 455 |
+
conf = analysis_result.get('confidence_score', 'N/A')
|
| 456 |
+
if 'HIGH' in risk:
|
| 457 |
+
risk_css = "color: #ef4444; font-weight: bold;"
|
| 458 |
+
status_bg = "#7f1d1d"
|
| 459 |
+
status_border = "#ef4444"
|
| 460 |
+
status_icon = "โ"
|
| 461 |
+
status_text = "TRADE BLOCKED: High Risk Detected"
|
| 462 |
+
else:
|
| 463 |
+
risk_css = "color: #10b981; font-weight: bold;"
|
| 464 |
+
status_bg = "#064e3b"
|
| 465 |
+
status_border = "#10b981"
|
| 466 |
+
status_icon = "๐"
|
| 467 |
+
status_text = "TRADE APPROVED"
|
| 468 |
+
|
| 469 |
+
report_html = f"""
|
| 470 |
+
<div style="background-color: #1f2937; padding: 20px; border-radius: 12px 12px 0 0; border: 1px solid #374151; border-bottom: none;">
|
| 471 |
+
<h3 style="margin-top: 0; color: #e5e7eb;">๐ค AI Risk Analyst Report</h3>
|
| 472 |
+
<div style="margin-bottom: 15px;"><strong style="color: #9ca3af;">Strategy:</strong><br><span style="color: #d1d5db;">{strat}</span></div>
|
| 473 |
+
<div style="margin-bottom: 15px;"><strong style="color: #9ca3af;">Risk Level:</strong><span style="margin-left: 8px; {risk_css}">{risk}</span></div>
|
| 474 |
+
<div style="margin-bottom: 15px;"><strong style="color: #9ca3af;">Justification:</strong><br><span style="color: #d1d5db;">{just}</span></div>
|
| 475 |
+
<div><strong style="color: #9ca3af;">Confidence:</strong> <span style="color: #d1d5db;">{conf}/10</span></div>
|
| 476 |
+
</div>
|
| 477 |
+
<div style="background-color: {status_bg}; color: white; padding: 15px; border-radius: 0 0 12px 12px; border: 2px solid {status_border}; text-align: center; font-size: 1.2em; font-weight: bold; display: flex; align-items: center; justify-content: center;">
|
| 478 |
+
<span style="margin-right: 10px; font-size: 1.4em;">{status_icon}</span>{status_text}
|
| 479 |
+
</div>"""
|
| 480 |
else:
|
| 481 |
+
report_html = f"""<div style="padding: 20px; background-color: #7f1d1d; color: #fca5a5; border-radius: 12px;"><h3>โ Analysis Failed to Parse</h3><p>{str(analysis_result)}</p></div>"""
|
| 482 |
+
# Final yield with all outputs including XAI plot
|
| 483 |
+
yield status_msg, alloc_df, xai_plot, report_html
|
| 484 |
except Exception as e:
|
| 485 |
import traceback
|
| 486 |
traceback.print_exc()
|
| 487 |
+
status_msg = f"Error: {str(e)}"
|
| 488 |
+
error_html = f"""<div style="padding: 20px; background-color: #7f1d1d; color: #fca5a5; border-radius: 12px;"><h3>โ Process Error</h3><p>{str(e)}</p></div>"""
|
| 489 |
+
# Final yield in case of error
|
| 490 |
+
yield status_msg, None, go.Figure(), error_html
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
# =========================================
|
| 494 |
+
# Tab 1 Logic: Live Dashboard (DUMMY DATA)
|
| 495 |
+
# =========================================
|
| 496 |
+
def get_dashboard_metrics():
|
| 497 |
+
return "$135,400", "+3.07%"
|
| 498 |
+
|
| 499 |
+
def get_portfolio_history_plot():
|
| 500 |
+
dates = pd.date_range(start="2023-01-01", periods=100)
|
| 501 |
+
np.random.seed(42)
|
| 502 |
+
rl_returns = np.random.normal(0.001, 0.01, 100)
|
| 503 |
+
bnh_returns = np.random.normal(0.0005, 0.012, 100)
|
| 504 |
+
rl_value = 10000 * np.cumprod(1 + rl_returns)
|
| 505 |
+
bnh_value = 10000 * np.cumprod(1 + bnh_returns)
|
| 506 |
+
fig = go.Figure()
|
| 507 |
+
fig.add_trace(go.Scatter(x=dates, y=rl_value, mode='lines', name='RL Agent (Live)', line=dict(color='#10b981', width=3)))
|
| 508 |
+
fig.add_trace(go.Scatter(x=dates, y=bnh_value, mode='lines', name='Benchmark', line=dict(color='#6b7280', dash='dash')))
|
| 509 |
+
fig.update_layout(title="Portfolio Net Worth (Live Tracking)", xaxis_title="Date", yaxis_title="Net Worth ($)", template="plotly_dark", paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1))
|
| 510 |
return fig
|
| 511 |
|
| 512 |
+
def get_current_allocation_plot():
|
| 513 |
+
labels = ASSETS + ['Cash']
|
| 514 |
+
values = [0.25, 0.10, 0.30, 0.15, 0.05, 0.15]
|
| 515 |
+
fig = px.pie(values=values, names=labels, title="Current Holdings Breakdown", color_discrete_sequence=px.colors.qualitative.Bold)
|
| 516 |
+
fig.update_traces(textposition='inside', textinfo='percent+label', hole=.4)
|
| 517 |
+
fig.update_layout(template="plotly_dark", paper_bgcolor='rgba(0,0,0,0)', legend=dict(orientation="h", yanchor="bottom", y=-0.1))
|
| 518 |
return fig
|
| 519 |
|
| 520 |
+
def get_recent_transactions():
|
| 521 |
+
data = [["2025-11-24", "Rebalance", "MULTIPLE", "N/A"], ["2025-11-24", "SELL", "SPY", "$4,500"], ["2025-11-24", "BUY", "TLT", "$4,200"], ["2025-11-21", "BUY", "BTC-USD", "$1,000"]]
|
| 522 |
+
return pd.DataFrame(data, columns=["Date", "Type", "Asset", "Approx. Value"])
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
# =========================================
|
| 526 |
+
# Gradio Interface
|
| 527 |
+
# =========================================
|
| 528 |
|
|
|
|
| 529 |
custom_css = """
|
| 530 |
.metric-box { background-color: #1f2937; padding: 20px; border-radius: 12px; border: 1px solid #374151; text-align: center; }
|
| 531 |
+
.metric-label { font-size: 1.1em; color: #9ca3af; margin-bottom: 5px; }
|
| 532 |
.metric-value { font-size: 2.2em; font-weight: 700; color: #e5e7eb; }
|
| 533 |
+
.disclaimer-box { background-color: #374151; padding: 15px; border-radius: 8px; border-left: 4px solid #f59e0b; color: #d1d5db; font-size: 0.9em; margin-bottom: 20px; }
|
| 534 |
"""
|
| 535 |
|
| 536 |
+
# theme = gr.themes.Soft(primary_hue="emerald", secondary_hue="slate", neutral_hue="zinc").set(
|
| 537 |
+
# body_background_fill="#111827", block_background_fill="#1f2937", block_border_width="1px", block_border_color="#374151"
|
| 538 |
+
# )
|
| 539 |
+
|
| 540 |
+
with gr.Blocks(
|
| 541 |
+
# theme=theme, css=custom_css,
|
| 542 |
+
title="Deep RL Portfolio Manager") as demo:
|
| 543 |
+
gr.HTML("""<script>function forceDark(){document.body.classList.add('dark');} forceDark(); setTimeout(forceDark, 500);</script>""")
|
| 544 |
+
|
| 545 |
gr.Markdown("# ๐ง Deep RL & LLM Portfolio Manager")
|
| 546 |
+
|
| 547 |
with gr.Tabs():
|
| 548 |
+
# ================= TAB 1: DASHBOARD (RESTORED) =================
|
| 549 |
+
with gr.TabItem("๐ Live Dashboard"):
|
| 550 |
+
# Metrics Row
|
| 551 |
with gr.Row():
|
| 552 |
+
nw_val, dc_val = get_dashboard_metrics()
|
| 553 |
+
with gr.Column(elem_classes=["metric-box"]):
|
| 554 |
+
gr.HTML(f"<div class='metric-label'>Current Net Worth</div><div class='metric-value'>{nw_val}</div>")
|
| 555 |
+
with gr.Column(elem_classes=["metric-box"]):
|
| 556 |
+
gr.HTML(f"<div class='metric-label'>24h Change</div><div class='metric-value' style='color: #10b981;'>{dc_val}</div>")
|
| 557 |
+
|
| 558 |
+
# Main Chart row
|
| 559 |
with gr.Row():
|
| 560 |
+
with gr.Column(scale=3):
|
| 561 |
+
history_chart = gr.Plot(value=get_portfolio_history_plot(), label="Net Worth History")
|
| 562 |
+
|
| 563 |
+
# Bottom Row: Allocations and Transactions
|
| 564 |
with gr.Row():
|
| 565 |
+
with gr.Column(scale=1):
|
| 566 |
+
allocation_chart = gr.Plot(value=get_current_allocation_plot(), label="Current Allocation")
|
| 567 |
+
with gr.Column(scale=2):
|
| 568 |
+
gr.Markdown("### Recent Transactions")
|
| 569 |
+
transactions_table = gr.Dataframe(value=get_recent_transactions(), interactive=False, wrap=True)
|
| 570 |
+
|
| 571 |
+
# ================= TAB 2: FORECAST (UPDATED with XAI) =================
|
| 572 |
+
with gr.TabItem("๐ฎ Forecast & AI Analysis"):
|
| 573 |
+
gr.Markdown("### Generate Tomorrow's Portfolio Strategy")
|
| 574 |
+
run_btn = gr.Button("๐ Run Overnight Analysis", variant="primary", size="lg")
|
| 575 |
+
status_output = gr.Textbox(label="System Status", placeholder="Ready...", interactive=False, lines=1)
|
| 576 |
+
gr.Markdown("---")
|
| 577 |
|
|
|
|
|
|
|
|
|
|
| 578 |
with gr.Row():
|
| 579 |
+
# Left Column: Allocations & XAI Plot
|
| 580 |
with gr.Column(scale=2):
|
| 581 |
+
gr.Markdown("### ๐ Suggested Position")
|
| 582 |
+
allocation_output = gr.Dataframe(headers=["Asset", "Allocation"], datatype=["str", "str"], interactive=False)
|
| 583 |
+
|
| 584 |
+
# NEW: XAI Feature Importance Plot
|
| 585 |
+
gr.Markdown("### ๐ง Why did the agent choose this?")
|
| 586 |
+
xai_output_plot = gr.Plot(label="Top Influential Factors (XAI)", show_label=False)
|
| 587 |
+
|
| 588 |
+
# Right Column: AI Analysis Report
|
| 589 |
with gr.Column(scale=3):
|
| 590 |
+
analysis_report_html = gr.HTML(label="AI Risk Analysis Report")
|
| 591 |
+
|
| 592 |
+
# Updated click event with new XAI output
|
| 593 |
+
run_btn.click(
|
| 594 |
+
fn=predict_and_analyze,
|
| 595 |
+
inputs=None,
|
| 596 |
+
outputs=[status_output, allocation_output, xai_output_plot, analysis_report_html]
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
# ================= TAB 3: HISTORICAL DATA ANALYST =================
|
| 600 |
+
with gr.TabItem("๐
Historical Data Analyst"):
|
| 601 |
+
gr.Markdown("### Analyze Past Market Performance with AI")
|
| 602 |
|
|
|
|
| 603 |
with gr.Row():
|
| 604 |
with gr.Column(scale=1):
|
| 605 |
+
all_tickers_hist = ASSETS + list(FRED_IDS.values())
|
| 606 |
+
if DASHBOARD_DATA_DF is not None:
|
| 607 |
+
available_tickers_hist = [t for t in all_tickers_hist if t in DASHBOARD_DATA_DF.columns]
|
| 608 |
+
else:
|
| 609 |
+
available_tickers_hist = []
|
| 610 |
+
default_tickers_hist = available_tickers_hist[:3] if available_tickers_hist else []
|
| 611 |
+
|
| 612 |
+
asset_selector = gr.Dropdown(choices=available_tickers_hist, value=default_tickers_hist, multiselect=True, label="1. Select Assets")
|
| 613 |
+
period_selector = gr.Dropdown(choices=list(TIME_PERIODS.keys()), value="1 Year", label="2. Select Period")
|
| 614 |
+
analyze_btn = gr.Button("๐ Run Analysis", variant="primary")
|
| 615 |
+
|
| 616 |
with gr.Column(scale=3):
|
| 617 |
+
historical_plot = gr.Plot(label="Performance Plot")
|
| 618 |
+
|
| 619 |
+
gr.Markdown("---")
|
| 620 |
+
historical_analysis_md = gr.Markdown("### ๐ค AI Analyst Report\n\n*Click 'Run Analysis' to generate.*")
|
| 621 |
+
|
| 622 |
+
analyze_btn.click(
|
| 623 |
+
fn=run_historical_analysis,
|
| 624 |
+
inputs=[asset_selector, period_selector],
|
| 625 |
+
outputs=[historical_plot, historical_analysis_md]
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
# ================= TAB 4: HISTORICAL SIMULATION (UPDATED with Pro Metrics) =================
|
| 629 |
+
with gr.TabItem("๐ Historical Simulation"):
|
| 630 |
+
gr.Markdown("### Backtest the RL Agent against Baselines")
|
| 631 |
+
|
| 632 |
+
# Disclaimer Box
|
| 633 |
+
gr.HTML(f"""
|
| 634 |
+
<div class='disclaimer-box'>
|
| 635 |
+
<strong>โ ๏ธ IMPORTANT DISCLAIMER:</strong> The RL model was trained on data from approximately
|
| 636 |
+
<strong>{TRAIN_START_DATE} to {TRAIN_END_DATE}</strong>. Running simulations outside or overlapping significantly
|
| 637 |
+
with this period may not accurately reflect real-world performance (lookahead bias or out-of-distribution data).
|
| 638 |
+
Use for educational purposes only.
|
| 639 |
+
</div>
|
| 640 |
+
""")
|
| 641 |
|
|
|
|
| 642 |
with gr.Row():
|
| 643 |
+
with gr.Column(scale=1):
|
| 644 |
+
start_date_input = gr.Textbox(label="Start Date (YYYY-MM-DD)", value=(datetime.now() - timedelta(days=365)).strftime('%Y-%m-%d'))
|
| 645 |
+
end_date_input = gr.Textbox(label="End Date (YYYY-MM-DD)", value=(datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d'))
|
| 646 |
+
sim_btn = gr.Button("โถ๏ธ Run Simulation", variant="primary")
|
| 647 |
+
sim_status = gr.Textbox(label="Status", interactive=False, lines=1)
|
| 648 |
+
|
| 649 |
+
with gr.Column(scale=3):
|
| 650 |
+
sim_plot = gr.Plot(label="Simulation Performance")
|
| 651 |
+
|
| 652 |
+
gr.Markdown("---")
|
| 653 |
+
# Updated to Markdown component for better table formatting
|
| 654 |
+
sim_metrics_md = gr.Markdown("### ๐ Professional Performance Metrics\n\n*Run simulation to see metrics.*")
|
| 655 |
+
|
| 656 |
+
sim_btn.click(
|
| 657 |
+
fn=run_historical_simulation,
|
| 658 |
+
inputs=[start_date_input, end_date_input],
|
| 659 |
+
outputs=[sim_plot, sim_status, sim_metrics_md]
|
| 660 |
+
)
|
| 661 |
|
| 662 |
if __name__ == "__main__":
|
| 663 |
demo.queue().launch(server_name="0.0.0.0", server_port=7860, debug=True, share=True)
|