Tulitula commited on
Commit
5bbf055
·
verified ·
1 Parent(s): 42f56cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +616 -424
app.py CHANGED
@@ -1,5 +1,4 @@
1
- 3# app.py
2
- import os, io, math, time, warnings
3
  warnings.filterwarnings("ignore")
4
 
5
  from typing import List, Tuple, Dict, Optional
@@ -8,38 +7,57 @@ import numpy as np
8
  import pandas as pd
9
  import matplotlib.pyplot as plt
10
  from PIL import Image
 
11
  import requests
12
  import yfinance as yf
13
- import gradio as gr
14
 
15
- # ---------------- config ----------------
 
 
 
 
 
 
16
  DATA_DIR = "data"
17
  os.makedirs(DATA_DIR, exist_ok=True)
18
 
 
19
  MAX_TICKERS = 30
20
- DEFAULT_LOOKBACK_YEARS = 10
21
- MARKET_TICKER = "VOO"
22
-
23
- SYNTH_ROWS = 1000 # size of generated dataset for suggestions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- # Globals that update with horizon changes
26
- HORIZON_YEARS = 10
27
- RF_CODE = "DGS10"
28
- RF_ANN = 0.0375 # updated at launch
29
-
30
- # ---------------- helpers ----------------
31
  def fred_series_for_horizon(years: float) -> str:
32
- # crude tenor map
33
  y = max(1.0, min(100.0, float(years)))
34
- if y <= 2: return "DGS2"
35
- if y <= 3: return "DGS3"
36
- if y <= 5: return "DGS5"
37
- if y <= 7: return "DGS7"
38
- if y <= 10: return "DGS10"
39
- if y <= 20: return "DGS20"
40
  return "DGS30"
41
 
42
  def fetch_fred_yield_annual(code: str) -> float:
 
43
  url = f"https://fred.stlouisfed.org/graph/fredgraph.csv?id={code}"
44
  try:
45
  r = requests.get(url, timeout=10)
@@ -50,103 +68,104 @@ def fetch_fred_yield_annual(code: str) -> float:
50
  except Exception:
51
  return 0.03
52
 
53
- def fetch_prices_monthly(tickers: List[str], years: int) -> pd.DataFrame:
54
- tickers = list(dict.fromkeys([t.upper().strip() for t in tickers]))
55
- start = (pd.Timestamp.today(tz="UTC") - pd.DateOffset(years=years, days=7)).date()
56
- end = pd.Timestamp.today(tz="UTC").date()
57
-
58
- df = yf.download(
59
- tickers,
60
- start=start,
61
- end=end,
62
- interval="1mo",
63
- auto_adjust=True,
64
- actions=False,
65
- progress=False,
66
- group_by="column",
67
- threads=False,
68
- )
69
-
70
- # Normalize to wide frame of prices (one column per ticker)
71
  if isinstance(df, pd.Series):
72
- df = df.to_frame()
73
  if isinstance(df.columns, pd.MultiIndex):
74
- # prefer Close; fall back to Adj Close if needed
75
- lvl0 = [str(x) for x in df.columns.get_level_values(0).unique()]
76
- if "Close" in lvl0:
77
- df = df["Close"]
78
- elif "Adj Close" in lvl0:
79
- df = df["Adj Close"]
80
- else:
81
- # take last level if unknown shape
82
- df = df.xs(df.columns.levels[0][-1], axis=1, level=0, drop_level=True)
 
 
83
  else:
84
- # some yfinance versions already return simple columns per ticker
85
- pass
 
 
 
 
 
 
 
 
86
 
87
- # keep only tickers we asked for, forward fill, drop all-NaN rows
88
- cols = [c for c in tickers if c in df.columns]
89
- out = df[cols].dropna(how="all").fillna(method="ffill")
90
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  def monthly_returns(prices: pd.DataFrame) -> pd.DataFrame:
93
  return prices.pct_change().dropna()
94
 
95
- def yahoo_search(query: str):
96
- if not query or not str(query).strip():
97
- return []
98
- url = "https://query1.finance.yahoo.com/v1/finance/search"
99
- params = {"q": query.strip(), "quotesCount": 10, "newsCount": 0}
100
- headers = {"User-Agent": "Mozilla/5.0"}
101
- try:
102
- r = requests.get(url, params=params, headers=headers, timeout=10)
103
- r.raise_for_status()
104
- data = r.json()
105
- out = []
106
- for q in data.get("quotes", []):
107
- sym = q.get("symbol")
108
- name = q.get("shortname") or q.get("longname") or ""
109
- exch = q.get("exchDisp") or ""
110
- if sym and sym.isascii():
111
- out.append(f"{sym} | {name} | {exch}")
112
- if not out:
113
- out = [f"{query.strip().upper()} | typed symbol | n/a"]
114
- return out[:10]
115
- except Exception:
116
- return [f"{query.strip().upper()} | typed symbol | n/a"]
117
-
118
  def validate_tickers(symbols: List[str], years: int) -> List[str]:
119
- base = [s for s in dict.fromkeys([t.upper().strip() for t in symbols]) if s]
 
 
120
  px = fetch_prices_monthly(base + [MARKET_TICKER], years)
121
- ok = [s for s in base if s in px.columns]
122
- # Ensure market exists as well for aligned computation
123
- if MARKET_TICKER not in px.columns:
124
- return [] # without market we can't compute CAPM moments
125
  return ok
126
 
127
- # -------------- aligned moments --------------
 
 
 
 
 
128
  def get_aligned_monthly_returns(symbols: List[str], years: int) -> pd.DataFrame:
129
- uniq = [c for c in dict.fromkeys(symbols) if c != MARKET_TICKER]
130
- tickers = uniq + [MARKET_TICKER]
131
- px = fetch_prices_monthly(tickers, years)
 
132
  rets = monthly_returns(px)
133
- cols = [c for c in uniq if c in rets.columns] + ([MARKET_TICKER] if MARKET_TICKER in rets.columns else [])
134
  R = rets[cols].dropna(how="any")
135
  return R.loc[:, ~R.columns.duplicated()]
136
 
137
  def estimate_all_moments_aligned(symbols: List[str], years: int, rf_ann: float):
138
- R = get_aligned_monthly_returns(symbols, years)
139
- if MARKET_TICKER not in R.columns or len(R) < 3:
140
- raise ValueError("Not enough aligned data with market proxy.")
141
  rf_m = rf_ann / 12.0
142
 
143
  m = R[MARKET_TICKER]
144
  if isinstance(m, pd.DataFrame):
145
  m = m.iloc[:, 0].squeeze()
146
 
147
- mu_m_ann = float(m.mean() * 12.0)
148
- sigma_m_ann = float(m.std(ddof=1) * math.sqrt(12.0))
149
- erp_ann = float(mu_m_ann - rf_ann)
150
 
151
  ex_m = m - rf_m
152
  var_m = float(np.var(ex_m.values, ddof=1))
@@ -158,7 +177,7 @@ def estimate_all_moments_aligned(symbols: List[str], years: int, rf_ann: float):
158
  cov_sm = float(np.cov(ex_s.values, ex_m.values, ddof=1)[0, 1])
159
  betas[s] = cov_sm / var_m
160
 
161
- betas[MARKET_TICKER] = 1.0
162
 
163
  asset_cols = [c for c in R.columns if c != MARKET_TICKER]
164
  cov_m = np.cov(R[asset_cols].values.T, ddof=1) if asset_cols else np.zeros((0, 0))
@@ -181,47 +200,77 @@ def portfolio_stats(weights: Dict[str, float],
181
  return 0.0, rf_ann, 0.0
182
  w_expo = w / gross
183
  beta_p = float(np.dot([betas.get(t, 0.0) for t in tickers], w_expo))
184
- mu_capm = capm_er(beta_p, rf_ann, erp_ann)
185
  cov = cov_ann.reindex(index=tickers, columns=tickers).fillna(0.0).to_numpy()
186
- sigma_hist = float(max(w_expo.T @ cov @ w_expo, 0.0)) ** 0.5
187
- return beta_p, mu_capm, sigma_hist
188
 
 
 
 
189
  def efficient_same_sigma(sigma_target: float, rf_ann: float, erp_ann: float, sigma_mkt: float):
190
- # weights on (Market, Bills) that achieve same sigma as target, on CML
191
  if sigma_mkt <= 1e-12:
192
  return 0.0, 1.0, rf_ann
193
  a = sigma_target / sigma_mkt
194
  return a, 1.0 - a, rf_ann + a * erp_ann
195
 
196
  def efficient_same_return(mu_target: float, rf_ann: float, erp_ann: float, sigma_mkt: float):
 
197
  if abs(erp_ann) <= 1e-12:
198
- return 0.0, 1.0, rf_ann
199
  a = (mu_target - rf_ann) / erp_ann
200
  return a, 1.0 - a, abs(a) * sigma_mkt
201
 
202
- # -------------- plotting (CAPM on CML) --------------
203
- def _pct(x):
204
- return np.asarray(x, dtype=float) * 100.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- def plot_cml(rf_ann, erp_ann, sigma_mkt, beta_p, mu_capm, sigma_capm, sugg_mu=None, sugg_sigma=None) -> Image.Image:
207
- fig = plt.figure(figsize=(6, 4), dpi=120)
 
208
 
209
- xmax = max(0.3, sigma_mkt * 2.2, (sigma_capm or 0.0) * 1.6, (sugg_sigma or 0.0) * 1.6)
210
- xs = np.linspace(0, xmax, 200)
211
- cml = rf_ann + (erp_ann / max(sigma_mkt, 1e-9)) * xs
212
 
213
- plt.plot(_pct(xs), _pct(cml), label="CML via Market", linewidth=1.8)
214
- # key points on CML (CAPM view)
215
- plt.scatter([_pct(0)], [_pct(rf_ann)], label="Risk-free")
216
- plt.scatter([_pct(sigma_mkt)], [_pct(rf_ann + erp_ann)], label="Market")
217
- plt.scatter([_pct(sigma_capm)], [_pct(mu_capm)], label="Your CAPM point", marker="o")
218
 
219
- if sugg_mu is not None and sugg_sigma is not None:
220
- plt.scatter([_pct(sugg_sigma)], [_pct(sugg_mu)], label="Selected Suggestion", marker="X", s=60)
 
 
 
 
 
221
 
222
- plt.xlabel("σ (annualized, %)")
223
- plt.ylabel("Expected return (annual, %)")
224
- plt.legend(loc="best")
225
  plt.tight_layout()
226
 
227
  buf = io.BytesIO()
@@ -230,138 +279,183 @@ def plot_cml(rf_ann, erp_ann, sigma_mkt, beta_p, mu_capm, sigma_capm, sugg_mu=No
230
  buf.seek(0)
231
  return Image.open(buf)
232
 
233
- # -------------- synthetic dataset (from current universe) --------------
234
- def build_synthetic_dataset(universe: List[str],
235
- covA: pd.DataFrame,
236
- betas: Dict[str, float],
237
- rf_ann: float,
238
- erp_ann: float,
239
- sigma_mkt: float,
240
- n_rows: int = SYNTH_ROWS) -> pd.DataFrame:
241
- rng = np.random.default_rng(12345)
242
- assets = [t for t in universe if t != MARKET_TICKER]
243
- if not assets:
244
- assets = [MARKET_TICKER]
245
-
 
 
 
 
246
  rows = []
247
  for i in range(n_rows):
248
- k = int(rng.integers(low=2, high=min(8, len(universe)) + 1))
249
- picks = list(rng.choice(universe, size=k, replace=False))
250
- # long-only exposures sum to 1 (cleaner for presentation)
251
- w = rng.dirichlet(np.ones(k))
252
- beta_p = float(np.dot([betas.get(t, 0.0) for t in picks], w))
253
- mu_capm = capm_er(beta_p, rf_ann, erp_ann)
254
- # historical sigma of that physical mix (not used on CML)
255
- sub = covA.reindex(index=picks, columns=picks).fillna(0.0).to_numpy()
256
- sigma_hist = float(max(w.T @ sub @ w, 0.0)) ** 0.5
257
- # CAPM sigma on CML for same expected return
258
- sigma_capm = abs(beta_p) * sigma_mkt
259
-
260
  rows.append({
 
261
  "tickers": ",".join(picks),
262
- "weights": ",".join(f"{x:.6f}" for x in w),
263
- "beta": beta_p,
264
- "mu_capm": mu_capm,
265
- "sigma_hist": sigma_hist,
266
- "sigma_capm": sigma_capm
267
  })
268
- return pd.DataFrame(rows)
269
-
270
- def _band_bounds(sigma_mkt: float, band: str) -> Tuple[float, float]:
271
- band = (band or "Medium").strip().lower()
272
- if band.startswith("low"):
273
- return 0.0, 0.8 * sigma_mkt
274
- if band.startswith("high"):
275
- return 1.2 * sigma_mkt, 3.0 * sigma_mkt
276
- # medium
277
- return 0.8 * sigma_mkt, 1.2 * sigma_mkt
278
-
279
- def top3_by_return_in_band(df: pd.DataFrame, band: str, sigma_mkt: float) -> pd.DataFrame:
280
- lo, hi = _band_bounds(sigma_mkt, band)
281
- pick = df[(df["sigma_capm"] >= lo) & (df["sigma_capm"] <= hi)].copy()
282
- if pick.empty:
283
- pick = df.copy()
284
- pick = pick.sort_values("mu_capm", ascending=False).head(3).reset_index(drop=True)
285
- pick.insert(0, "pick", [1, 2, 3][: len(pick)])
286
- return pick
287
-
288
- # -------------- optional: embeddings rerank --------------
289
- def rerank_with_embeddings(top3: pd.DataFrame, band: str) -> pd.DataFrame:
290
  try:
291
- from sentence_transformers import SentenceTransformer
292
- model = SentenceTransformer("FinLang/finance-embeddings-investopedia")
293
- prompt = {
294
- "low": "low risk conservative portfolio stable diversified market exposure",
295
- "medium": "balanced medium risk diversified portfolio",
296
- "high": "high risk growth aggressive portfolio higher expected return"
297
- }[(band or "medium").lower() if (band or "medium").lower() in {"low","medium","high"} else "medium"]
298
-
299
- cand_texts = []
300
- for _, r in top3.iterrows():
301
- cand_texts.append(
302
- f"portfolio with tickers {r['tickers']} having beta {float(r['beta']):.2f}, "
303
- f"expected return {float(r['mu_capm']):.3f}, sigma {float(r['sigma_capm']):.3f}"
304
- )
305
-
306
- q = model.encode([prompt])
307
- c = model.encode(cand_texts)
308
- # cosine similarity
309
- sims = (q @ c.T) / (np.linalg.norm(q) * np.linalg.norm(c, axis=1, keepdims=False))
310
- order = np.argsort(-sims.ravel())
311
- return top3.iloc[order].reset_index(drop=True)
312
  except Exception:
313
- return top3
314
-
315
- # -------------- UI helpers --------------
316
- def empty_positions_df():
317
- return pd.DataFrame(columns=["ticker", "amount_usd", "weight_exposure", "beta"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
 
319
- def empty_suggestion_df():
320
- return pd.DataFrame(columns=["ticker", "weight_%", "amount_$"])
321
 
322
- def set_horizon(years: float):
323
- y = max(1.0, min(100.0, float(years)))
324
- code = fred_series_for_horizon(y)
325
- rf = fetch_fred_yield_annual(code)
326
- global HORIZON_YEARS, RF_CODE, RF_ANN
327
- HORIZON_YEARS = y
328
- RF_CODE = code
329
- RF_ANN = rf
330
- return f"Risk-free series {code}. Latest annual rate {rf:.2%}."
331
 
332
- def search_tickers_cb(q: str):
333
- opts = yahoo_search(q)
334
- note = "Select a symbol and click 'Add selected to portfolio'." if opts else "No matches."
335
- return note, gr.update(choices=opts, value=None)
336
 
337
- def add_symbol(selection: str, table: Optional[pd.DataFrame]):
338
- if not selection:
339
- return table if isinstance(table, pd.DataFrame) else pd.DataFrame(columns=["ticker","amount_usd"]), "Pick a row in Matches first."
340
- symbol = selection.split("|")[0].strip().upper()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
 
342
  current = []
343
- if isinstance(table, pd.DataFrame) and not table.empty:
344
  current = [str(x).upper() for x in table["ticker"].tolist() if str(x) != "nan"]
 
345
  tickers = current if symbol in current else current + [symbol]
 
346
 
 
347
  val = validate_tickers(tickers, years=DEFAULT_LOOKBACK_YEARS)
348
  tickers = [t for t in tickers if t in val]
349
-
350
  amt_map = {}
351
- if isinstance(table, pd.DataFrame) and not table.empty:
352
  for _, r in table.iterrows():
353
  t = str(r.get("ticker", "")).upper()
354
  if t in tickers:
355
  amt_map[t] = float(pd.to_numeric(r.get("amount_usd", 0.0), errors="coerce") or 0.0)
356
 
357
  new_table = pd.DataFrame({"ticker": tickers, "amount_usd": [amt_map.get(t, 0.0) for t in tickers]})
 
358
  if len(new_table) > MAX_TICKERS:
359
  new_table = new_table.iloc[:MAX_TICKERS]
360
- return new_table, f"Reached max of {MAX_TICKERS}."
361
- return new_table, f"Added {symbol}."
362
 
363
- def lock_ticker_column(tb: Optional[pd.DataFrame]):
364
- if not isinstance(tb, pd.DataFrame) or tb.empty:
365
  return pd.DataFrame(columns=["ticker", "amount_usd"])
366
  tickers = [str(x).upper() for x in tb["ticker"].tolist()]
367
  amounts = pd.to_numeric(tb["amount_usd"], errors="coerce").fillna(0.0).tolist()
@@ -370,220 +464,318 @@ def lock_ticker_column(tb: Optional[pd.DataFrame]):
370
  amounts = amounts[:len(tickers)] + [0.0] * max(0, len(tickers) - len(amounts))
371
  return pd.DataFrame({"ticker": tickers, "amount_usd": amounts})
372
 
373
- # -------------- main compute --------------
374
- UNIVERSE: List[str] = [MARKET_TICKER, "QQQ", "VTI", "SOXX", "IBIT"]
375
-
376
- def compute(
377
- years_lookback: int,
378
- table: Optional[pd.DataFrame],
379
- risk_band: str,
380
- use_embeddings: bool,
381
- pick_idx: int
382
- ):
383
- # sanitize table
384
- if isinstance(table, pd.DataFrame):
385
- df = table.copy()
386
- else:
387
- df = pd.DataFrame(columns=["ticker", "amount_usd"])
388
- df = df.dropna(how="all")
389
- if "ticker" not in df.columns: df["ticker"] = []
390
- if "amount_usd" not in df.columns: df["amount_usd"] = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
  df["ticker"] = df["ticker"].astype(str).str.upper().str.strip()
392
  df["amount_usd"] = pd.to_numeric(df["amount_usd"], errors="coerce").fillna(0.0)
393
 
394
  symbols = [t for t in df["ticker"].tolist() if t]
 
395
  if len(symbols) == 0:
396
- return None, "Add at least one ticker.", "Universe empty.", empty_positions_df(), empty_suggestion_df(), None
397
-
398
- symbols = validate_tickers(symbols, years_lookback)
399
- if len(symbols) == 0:
400
- return None, "Could not validate any tickers.", "Universe invalid.", empty_positions_df(), empty_suggestion_df(), None
401
-
402
- global UNIVERSE
403
- UNIVERSE = list(sorted(set([s for s in symbols if s != MARKET_TICKER] + [MARKET_TICKER])))[:MAX_TICKERS]
404
-
405
  df = df[df["ticker"].isin(symbols)].copy()
406
  amounts = {r["ticker"]: float(r["amount_usd"]) for _, r in df.iterrows()}
407
- rf_ann = RF_ANN
408
-
409
- # Moments
410
- moms = estimate_all_moments_aligned(symbols, years_lookback, rf_ann)
 
 
 
 
 
 
 
 
 
411
  betas, covA, erp_ann, sigma_mkt = moms["betas"], moms["cov_ann"], moms["erp_ann"], moms["sigma_m_ann"]
412
 
413
- # Weights
414
- gross = sum(abs(v) for v in amounts.values())
415
- if gross <= 1e-12:
416
- return None, "All amounts are zero.", "Universe ok.", empty_positions_df(), empty_suggestion_df(), None
417
- weights = {k: v / gross for k, v in amounts.items()}
418
-
419
- # Portfolio CAPM stats
420
- beta_p, mu_capm, sigma_hist = portfolio_stats(weights, covA, betas, rf_ann, erp_ann)
421
- sigma_capm = abs(beta_p) * sigma_mkt
422
-
423
- # Efficient alternatives (using historical σ and CAPM μ for reference)
424
- a_sigma, b_sigma, mu_eff_sigma = efficient_same_sigma(sigma_hist, rf_ann, erp_ann, sigma_mkt)
425
- a_mu, b_mu, sigma_eff_mu = efficient_same_return(mu_capm, rf_ann, erp_ann, sigma_mkt)
426
-
427
- # Synthetic dataset & suggestions
428
- synth = build_synthetic_dataset(UNIVERSE, covA, betas, rf_ann, erp_ann, sigma_mkt, n_rows=SYNTH_ROWS)
429
- csv_path = os.path.join(DATA_DIR, f"investor_profiles_{int(time.time())}.csv")
430
- synth.to_csv(csv_path, index=False)
431
-
432
- top3 = top3_by_return_in_band(synth, risk_band, sigma_mkt)
433
- if use_embeddings:
434
- top3 = rerank_with_embeddings(top3, risk_band)
435
- if top3.empty:
436
- top3 = synth.sort_values("mu_capm", ascending=False).head(3).reset_index(drop=True)
437
- top3.insert(0, "pick", [1, 2, 3][: len(top3)])
438
-
439
- idx = max(1, min(3, int(pick_idx))) - 1
440
- row = top3.iloc[idx]
441
-
442
- sugg_mu = float(row["mu_capm"])
443
- sugg_sigma = float(row["sigma_capm"])
444
-
445
- # suggestion holdings (% and $)
446
- ts = [t.strip() for t in str(row["tickers"]).split(",")]
447
- ws = [float(x) for x in str(row["weights"]).split(",")]
448
- s = sum(ws) if ws else 1.0
449
- ws = [max(0.0, w) / s for w in ws]
450
- budget = gross if gross > 0 else 1.0
451
- sugg_table = pd.DataFrame(
452
- [{"ticker": t, "weight_%": round(w*100.0, 2), "amount_$": round(w*budget, 0)} for t, w in zip(ts, ws)],
453
- columns=["ticker", "weight_%", "amount_$"]
454
- )
455
-
456
- # positions table
457
- pos_table = pd.DataFrame(
458
- [{
459
- "ticker": t,
460
- "amount_usd": amounts.get(t, 0.0),
461
- "weight_exposure": weights.get(t, 0.0),
462
- "beta": 1.0 if t == MARKET_TICKER else betas.get(t, np.nan)
463
- } for t in symbols],
464
- columns=["ticker", "amount_usd", "weight_exposure", "beta"]
465
- )
466
-
467
- # plot
468
- img = plot_cml(rf_ann, erp_ann, sigma_mkt, beta_p, mu_capm, sigma_capm, sugg_mu, sugg_sigma)
469
-
470
- info = "\n".join([
471
- "### Inputs",
472
- f"- Lookback years {years_lookback}",
473
- f"- Horizon years {int(round(HORIZON_YEARS))}",
474
- f"- Risk-free {rf_ann:.2%} from {RF_CODE}",
475
- f"- Market ERP {erp_ann:.2%}",
476
- f"- Market σ {sigma_mkt:.2%}",
477
- "",
478
- "### Your portfolio (CAPM)",
479
- f"- Beta {beta_p:.2f}",
480
- f"- Expected return (CAPM / SML) {mu_capm:.2%}",
481
- f"- on CML for your beta (|β|×σ_mkt) {sigma_capm:.2%}",
482
- "",
483
- "### Efficient alternatives on CML",
484
- f"- Same σ as your portfolio (historical): Market weight {a_sigma:.2f}, Bills weight {b_sigma:.2f}, return {mu_eff_sigma:.2%}",
485
- f"- Same return (CAPM): Market weight {a_mu:.2f}, Bills weight {b_mu:.2f}, σ {sigma_eff_mu:.2%}",
486
- "",
487
- "### Dataset-based suggestions (risk: " + risk_band + ")",
488
- f"- Use the carousel to flip between **Pick #1 / #2 / #3**.",
489
- f"- Showing Pick **#{idx+1}** → CAPM return {sugg_mu:.2%}, CAPM σ {sugg_sigma:.2%}",
490
- "",
491
- "_Plot shows CAPM expectations on the CML (not historical means)._"
492
- ])
493
-
494
- uni_msg = f"Universe set to: {', '.join(UNIVERSE)}"
495
- return img, info, uni_msg, pos_table, sugg_table, csv_path, gr.update(label=f"Pick #{idx+1} of 3")
496
-
497
- # -------------- UI --------------
498
- def inc_pick(i: int): return min(3, max(1, int(i or 1) + 1))
499
- def dec_pick(i: int): return max(1, min(3, int(i or 1) - 1))
500
-
501
- with gr.Blocks(title="Efficient Portfolio Advisor") as demo:
502
- gr.Markdown(
503
- "## Efficient Portfolio Advisor\n"
504
- "Search symbols, enter **dollar amounts**, set horizon. Returns use Yahoo Finance monthly data; risk-free from FRED. "
505
- "Plot shows **CAPM point on the CML** plus efficient CML points."
506
- )
507
-
508
- with gr.Row():
509
- with gr.Column(scale=1):
510
- q = gr.Textbox(label="Search symbol")
511
- search_note = gr.Markdown()
512
- matches = gr.Dropdown(choices=[], label="Matches")
513
- search_btn = gr.Button("Search")
514
- add_btn = gr.Button("Add selected to portfolio")
515
-
516
- gr.Markdown("### Portfolio positions (enter $ amounts; negatives allowed for shorts)")
517
- table = gr.Dataframe(
518
- headers=["ticker", "amount_usd"],
519
- datatype=["str", "number"],
520
- row_count=0,
521
- col_count=(2, "fixed")
522
- )
523
-
524
- horizon = gr.Number(label="Horizon in years (1–100)", value=HORIZON_YEARS, precision=0)
525
- lookback = gr.Slider(1, 15, value=DEFAULT_LOOKBACK_YEARS, step=1, label="Lookback years for betas & covariances")
526
-
527
- gr.Markdown("### Suggestions")
528
- risk_band = gr.Radio(["Low", "Medium", "High"], value="Medium", label="Risk tolerance")
529
- use_emb = gr.Checkbox(value=True, label="Use finance embeddings to refine picks")
530
-
531
- with gr.Row():
532
- prev_btn = gr.Button("◀ Prev")
533
- pick_idx = gr.Number(value=1, precision=0, label="Carousel")
534
- next_btn = gr.Button("Next ▶")
535
-
536
- run_btn = gr.Button("Compute (build dataset & suggest)")
537
- with gr.Column(scale=1):
538
- plot = gr.Image(label="Capital Market Line (CAPM)", type="pil")
539
- summary = gr.Markdown(label="Inputs & Results")
540
- universe_msg = gr.Textbox(label="Universe status", interactive=False)
541
- positions = gr.Dataframe(
542
- label="Computed positions",
543
- headers=["ticker", "amount_usd", "weight_exposure", "beta"],
544
- datatype=["str", "number", "number", "number"],
545
- col_count=(4, "fixed"),
546
- value=empty_positions_df(),
547
- interactive=False
548
- )
549
- sugg_table = gr.Dataframe(
550
- label="Selected suggestion (carousel) — holdings shown in % and $",
551
- headers=["ticker", "weight_%", "amount_$"],
552
- datatype=["str", "number", "number"],
553
- col_count=(3, "fixed"),
554
- value=empty_suggestion_df(),
555
- interactive=False
556
- )
557
- dl = gr.File(label="Generated dataset CSV", value=None, visible=True)
558
-
559
- # wire search / add / locking / horizon
560
- search_btn.click(fn=search_tickers_cb, inputs=q, outputs=[search_note, matches])
561
- add_btn.click(fn=add_symbol, inputs=[matches, table], outputs=[table, search_note])
562
- table.change(fn=lock_ticker_column, inputs=table, outputs=table)
563
- horizon.change(fn=set_horizon, inputs=horizon, outputs=universe_msg)
564
-
565
- # carousel buttons update pick index and then recompute
566
- prev_btn.click(fn=dec_pick, inputs=pick_idx, outputs=pick_idx).then(
567
- fn=compute,
568
- inputs=[lookback, table, risk_band, use_emb, pick_idx],
569
- outputs=[plot, summary, universe_msg, positions, sugg_table, dl, pick_idx]
570
- )
571
- next_btn.click(fn=inc_pick, inputs=pick_idx, outputs=pick_idx).then(
572
- fn=compute,
573
- inputs=[lookback, table, risk_band, use_emb, pick_idx],
574
- outputs=[plot, summary, universe_msg, positions, sugg_table, dl, pick_idx]
575
- )
576
 
577
- # main compute
578
- run_btn.click(
579
- fn=compute,
580
- inputs=[lookback, table, risk_band, use_emb, pick_idx],
581
- outputs=[plot, summary, universe_msg, positions, sugg_table, dl, pick_idx]
 
582
  )
583
 
584
- # initialize risk-free at launch
585
- RF_CODE = fred_series_for_horizon(HORIZON_YEARS)
586
- RF_ANN = fetch_fred_yield_annual(RF_CODE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
 
588
  if __name__ == "__main__":
589
  demo.launch()
 
1
+ import os, io, math, json, warnings
 
2
  warnings.filterwarnings("ignore")
3
 
4
  from typing import List, Tuple, Dict, Optional
 
7
  import pandas as pd
8
  import matplotlib.pyplot as plt
9
  from PIL import Image
10
+ import gradio as gr
11
  import requests
12
  import yfinance as yf
 
13
 
14
+ from sentence_transformers import SentenceTransformer, util as st_util
15
+ from sklearn.preprocessing import StandardScaler
16
+ from sklearn.neighbors import KNeighborsRegressor
17
+
18
+ # =========================
19
+ # Config
20
+ # =========================
21
  DATA_DIR = "data"
22
  os.makedirs(DATA_DIR, exist_ok=True)
23
 
24
+ DEFAULT_LOOKBACK_YEARS = 5
25
  MAX_TICKERS = 30
26
+ MARKET_TICKER = "VOO" # proxy for market portfolio
27
+ BILLS_TICKER = "BILLS" # synthetic cash / T-Bills bucket
28
+
29
+ EMBED_MODEL_NAME = "BAAI/bge-base-en-v1.5" # fully local, no API keys
30
+
31
+ POS_COLS = ["ticker", "amount_usd", "weight_exposure", "beta"]
32
+ SUG_COLS = ["ticker", "weight_%", "amount_$"]
33
+ EFF_COLS = ["asset", "weight_%", "amount_$"]
34
+
35
+ N_SYNTH = 1000 # size of synthetic dataset per run
36
+ MMR_K = 40 # shortlist size before MMR
37
+ MMR_LAMBDA = 0.65 # similarity vs diversity tradeoff
38
+
39
+ # ---------------- FRED mapping (risk-free source) ----------------
40
+ FRED_MAP = [
41
+ (1, "DGS1"),
42
+ (2, "DGS2"),
43
+ (3, "DGS3"),
44
+ (5, "DGS5"),
45
+ (7, "DGS7"),
46
+ (10, "DGS10"),
47
+ (20, "DGS20"),
48
+ (30, "DGS30"),
49
+ (100, "DGS30"),
50
+ ]
51
 
 
 
 
 
 
 
52
  def fred_series_for_horizon(years: float) -> str:
 
53
  y = max(1.0, min(100.0, float(years)))
54
+ for cutoff, code in FRED_MAP:
55
+ if y <= cutoff:
56
+ return code
 
 
 
57
  return "DGS30"
58
 
59
  def fetch_fred_yield_annual(code: str) -> float:
60
+ # FRED CSV endpoint (no API key required). Fallback to 3% if it fails.
61
  url = f"https://fred.stlouisfed.org/graph/fredgraph.csv?id={code}"
62
  try:
63
  r = requests.get(url, timeout=10)
 
68
  except Exception:
69
  return 0.03
70
 
71
+ # =========================
72
+ # Data helpers
73
+ # =========================
74
+ def _to_cols_close(df: pd.DataFrame) -> pd.DataFrame:
75
+ """Coerce yfinance download to a single-level columns DataFrame of adjusted closes."""
76
+ if df is None or df.empty:
77
+ return pd.DataFrame()
78
+ # yfinance returns:
79
+ # - Series if single ticker;
80
+ # - DataFrame w/ single-level columns if single ticker but group_by==None;
81
+ # - MultiIndex columns (ticker -> field) if multiple tickers.
 
 
 
 
 
 
 
82
  if isinstance(df, pd.Series):
83
+ df = df.to_frame("Close")
84
  if isinstance(df.columns, pd.MultiIndex):
85
+ # Prefer "Adj Close" if available, else "Close"
86
+ level0 = df.columns.get_level_values(0).unique().tolist()
87
+ fields = df.columns.get_level_values(1).unique().tolist()
88
+ field = "Adj Close" if "Adj Close" in fields else ("Close" if "Close" in fields else fields[0])
89
+ out = {}
90
+ for t in level0:
91
+ col = (t, field)
92
+ if col in df.columns:
93
+ out[t] = df[col]
94
+ out_df = pd.DataFrame(out)
95
+ return out_df
96
  else:
97
+ # Single ticker. Column could be "Close" already.
98
+ if "Adj Close" in df.columns:
99
+ return df[["Adj Close"]].rename(columns={"Adj Close": "SINGLE"})
100
+ if "Close" in df.columns:
101
+ return df[["Close"]].rename(columns={"Close": "SINGLE"})
102
+ # Fallback: use first numeric column
103
+ num_cols = [c for c in df.columns if pd.api.types.is_numeric_dtype(df[c])]
104
+ if num_cols:
105
+ return df[[num_cols[0]]].rename(columns={num_cols[0]: "SINGLE"})
106
+ return pd.DataFrame()
107
 
108
+ def fetch_prices_monthly(tickers: List[str], years: int) -> pd.DataFrame:
109
+ start = (pd.Timestamp.today(tz="UTC") - pd.DateOffset(years=int(years), days=7)).date()
110
+ end = pd.Timestamp.today(tz="UTC").date()
111
+ df_raw = yf.download(
112
+ list(dict.fromkeys(tickers)),
113
+ start=start, end=end,
114
+ interval="1mo", auto_adjust=True, progress=False, group_by="ticker",
115
+ threads=True,
116
+ )
117
+ df = _to_cols_close(df_raw).copy()
118
+ if df.empty:
119
+ return df
120
+ # If single series, rename to the single ticker name
121
+ if df.shape[1] == 1 and "SINGLE" in df.columns:
122
+ df.columns = [tickers[0]]
123
+ df = df.dropna(how="all").fillna(method="ffill")
124
+ return df
125
 
126
  def monthly_returns(prices: pd.DataFrame) -> pd.DataFrame:
127
  return prices.pct_change().dropna()
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  def validate_tickers(symbols: List[str], years: int) -> List[str]:
130
+ """Return subset of symbols that have enough data over lookback."""
131
+ symbols = [s.strip().upper() for s in symbols if s and isinstance(s, str)]
132
+ base = [s for s in symbols if s != MARKET_TICKER]
133
  px = fetch_prices_monthly(base + [MARKET_TICKER], years)
134
+ ok = []
135
+ for s in symbols:
136
+ if s in px.columns:
137
+ ok.append(s)
138
  return ok
139
 
140
+ # =========================
141
+ # Moments & CAPM
142
+ # =========================
143
+ def annualize_mean(m): return np.asarray(m, dtype=float) * 12.0
144
+ def annualize_sigma(s): return np.asarray(s, dtype=float) * math.sqrt(12.0)
145
+
146
  def get_aligned_monthly_returns(symbols: List[str], years: int) -> pd.DataFrame:
147
+ uniq = [c for c in dict.fromkeys(symbols)]
148
+ if MARKET_TICKER not in uniq:
149
+ uniq.append(MARKET_TICKER)
150
+ px = fetch_prices_monthly(uniq, years)
151
  rets = monthly_returns(px)
152
+ cols = [c for c in uniq if c in rets.columns]
153
  R = rets[cols].dropna(how="any")
154
  return R.loc[:, ~R.columns.duplicated()]
155
 
156
  def estimate_all_moments_aligned(symbols: List[str], years: int, rf_ann: float):
157
+ R = get_aligned_monthly_returns(symbols + [MARKET_TICKER], years)
158
+ if MARKET_TICKER not in R.columns or R.shape[0] < 3:
159
+ raise ValueError("Not enough aligned data to estimate moments.")
160
  rf_m = rf_ann / 12.0
161
 
162
  m = R[MARKET_TICKER]
163
  if isinstance(m, pd.DataFrame):
164
  m = m.iloc[:, 0].squeeze()
165
 
166
+ mu_m_ann = float(annualize_mean(m.mean()))
167
+ sigma_m_ann = float(annualize_sigma(m.std(ddof=1)))
168
+ erp_ann = float(mu_m_ann - rf_ann)
169
 
170
  ex_m = m - rf_m
171
  var_m = float(np.var(ex_m.values, ddof=1))
 
177
  cov_sm = float(np.cov(ex_s.values, ex_m.values, ddof=1)[0, 1])
178
  betas[s] = cov_sm / var_m
179
 
180
+ betas[MARKET_TICKER] = 1.0 # by definition
181
 
182
  asset_cols = [c for c in R.columns if c != MARKET_TICKER]
183
  cov_m = np.cov(R[asset_cols].values.T, ddof=1) if asset_cols else np.zeros((0, 0))
 
200
  return 0.0, rf_ann, 0.0
201
  w_expo = w / gross
202
  beta_p = float(np.dot([betas.get(t, 0.0) for t in tickers], w_expo))
203
+ er_capm = capm_er(beta_p, rf_ann, erp_ann)
204
  cov = cov_ann.reindex(index=tickers, columns=tickers).fillna(0.0).to_numpy()
205
+ sigma_p = math.sqrt(max(float(w_expo.T @ cov @ w_expo), 0.0))
206
+ return beta_p, er_capm, sigma_p
207
 
208
+ # =========================
209
+ # Efficient (CML) alternatives
210
+ # =========================
211
  def efficient_same_sigma(sigma_target: float, rf_ann: float, erp_ann: float, sigma_mkt: float):
212
+ """Weights (a on Market, b on Bills) and expected return on CML with same sigma."""
213
  if sigma_mkt <= 1e-12:
214
  return 0.0, 1.0, rf_ann
215
  a = sigma_target / sigma_mkt
216
  return a, 1.0 - a, rf_ann + a * erp_ann
217
 
218
  def efficient_same_return(mu_target: float, rf_ann: float, erp_ann: float, sigma_mkt: float):
219
+ """Weights (a on Market, b on Bills) and sigma on CML with same expected return."""
220
  if abs(erp_ann) <= 1e-12:
221
+ return 0.0, 1.0, 0.0
222
  a = (mu_target - rf_ann) / erp_ann
223
  return a, 1.0 - a, abs(a) * sigma_mkt
224
 
225
+ # =========================
226
+ # Plot
227
+ # =========================
228
+ def _pct_arr(x):
229
+ x = np.asarray(x, dtype=float)
230
+ return x * 100.0
231
+
232
+ def plot_cml(
233
+ rf_ann, erp_ann, sigma_mkt,
234
+ pt_sigma_hist, pt_mu_capm,
235
+ same_sigma_sigma, same_sigma_mu,
236
+ same_mu_sigma, same_mu_mu,
237
+ ) -> Image.Image:
238
+ fig = plt.figure(figsize=(6.6, 4.4), dpi=130)
239
+
240
+ xmax = max(
241
+ 0.3,
242
+ sigma_mkt * 2.0,
243
+ pt_sigma_hist * 1.4,
244
+ same_mu_sigma * 1.4,
245
+ same_sigma_sigma * 1.4,
246
+ )
247
+
248
+ xs = np.linspace(0, xmax, 160)
249
+ slope = erp_ann / max(sigma_mkt, 1e-12)
250
+ cml = rf_ann + slope * xs
251
 
252
+ plt.plot(_pct_arr(xs), _pct_arr(cml), label="CML via VOO", linewidth=1.8)
253
+ plt.scatter([0.0], [_pct_arr(rf_ann)], label="Risk-free", zorder=5)
254
+ plt.scatter([_pct_arr(sigma_mkt)], [_pct_arr(rf_ann + erp_ann)], label="Market (VOO)", zorder=5)
255
 
256
+ # Your portfolio point uses CAPM expected return + historical sigma
257
+ plt.scatter([_pct_arr(pt_sigma_hist)], [_pct_arr(pt_mu_capm)], label="Your portfolio (CAPM)", zorder=6)
 
258
 
259
+ # Efficient matches
260
+ plt.scatter([_pct_arr(same_sigma_sigma)], [_pct_arr(same_sigma_mu)], label="Efficient: same σ", zorder=5)
261
+ plt.scatter([_pct_arr(same_mu_sigma)], [_pct_arr(same_mu_mu)], label="Efficient: same μ", zorder=5)
 
 
262
 
263
+ # helper guides
264
+ plt.plot([_pct_arr(pt_sigma_hist), _pct_arr(same_sigma_sigma)],
265
+ [_pct_arr(pt_mu_capm), _pct_arr(same_sigma_mu)],
266
+ ls="--", lw=1.1, alpha=0.7, color="gray")
267
+ plt.plot([_pct_arr(pt_sigma_hist), _pct_arr(same_mu_sigma)],
268
+ [_pct_arr(pt_mu_capm), _pct_arr(same_mu_mu)],
269
+ ls="--", lw=1.1, alpha=0.7, color="gray")
270
 
271
+ plt.xlabel("σ (annual, %)")
272
+ plt.ylabel("E[return] (annual, %)")
273
+ plt.legend(loc="best", fontsize=8)
274
  plt.tight_layout()
275
 
276
  buf = io.BytesIO()
 
279
  buf.seek(0)
280
  return Image.open(buf)
281
 
282
+ # =========================
283
+ # Synthetic dataset (for recommendations)
284
+ # =========================
285
+ def dirichlet_signed(k, rng):
286
+ signs = rng.choice([-1.0, 1.0], size=k, p=[0.25, 0.75])
287
+ raw = rng.dirichlet(np.ones(k))
288
+ gross = 1.0 + float(rng.gamma(2.0, 0.5))
289
+ return gross * signs * raw
290
+
291
+ def build_synth_dataset(universe: List[str],
292
+ cov_ann: pd.DataFrame,
293
+ betas: Dict[str, float],
294
+ rf_ann: float, erp_ann: float,
295
+ n_rows: int = N_SYNTH,
296
+ seed: int = 123) -> pd.DataFrame:
297
+ rng = np.random.default_rng(seed)
298
+ U = [u for u in universe if u != MARKET_TICKER] + [MARKET_TICKER]
299
  rows = []
300
  for i in range(n_rows):
301
+ k = rng.integers(low=min(2, len(U)), high=min(8, len(U)) + 1)
302
+ picks = list(rng.choice(U, size=k, replace=False))
303
+ w = dirichlet_signed(k, rng) # exposure weights (can include short)
304
+ gross = float(np.sum(np.abs(w)))
305
+ if gross <= 1e-12:
306
+ continue
307
+ w_expo = w / gross
308
+ weights = {picks[j]: float(w_expo[j]) for j in range(k)}
309
+ beta_i, er_capm_i, sigma_i = portfolio_stats(weights, cov_ann, betas, rf_ann, erp_ann)
 
 
 
310
  rows.append({
311
+ "id": int(i),
312
  "tickers": ",".join(picks),
313
+ "weights": ",".join(f"{x:.6f}" for x in w_expo),
314
+ "beta": float(beta_i),
315
+ "er_capm": float(er_capm_i),
316
+ "sigma": float(sigma_i),
 
317
  })
318
+ df = pd.DataFrame(rows)
319
+ return df
320
+
321
+ # =========================
322
+ # Embeddings + MMR selection
323
+ # =========================
324
+ _embedder = None
325
+ def get_embedder():
326
+ global _embedder
327
+ if _embedder is None:
328
+ _embedder = SentenceTransformer(EMBED_MODEL_NAME)
329
+ return _embedder
330
+
331
+ def row_to_sentence(row: pd.Series) -> str:
 
 
 
 
 
 
 
 
332
  try:
333
+ ts = row["tickers"].split(",")
334
+ ws = [float(x) for x in row["weights"].split(",")]
335
+ pairs = ", ".join([f"{ts[i]} {ws[i]:+.2f}" for i in range(min(len(ts), len(ws)))])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  except Exception:
337
+ pairs = ""
338
+ return (f"portfolio with sigma {row['sigma']:.4f}, "
339
+ f"capm_return {row['er_capm']:.4f}, "
340
+ f"beta {row['beta']:.3f}, "
341
+ f"exposures {pairs}")
342
+
343
+ def mmr_select(query_emb: np.ndarray,
344
+ cand_embs: np.ndarray,
345
+ k: int = 3,
346
+ lambda_param: float = MMR_LAMBDA) -> List[int]:
347
+ """
348
+ Maximal Marginal Relevance: pick k diverse-yet-relevant indices.
349
+ """
350
+ if cand_embs.shape[0] <= k:
351
+ return list(range(cand_embs.shape[0]))
352
+ sim_to_query = st_util.cos_sim(query_emb, cand_embs).cpu().numpy().reshape(-1)
353
+ chosen = []
354
+ candidate_indices = list(range(cand_embs.shape[0]))
355
+ # pick the most similar first
356
+ first = int(np.argmax(sim_to_query))
357
+ chosen.append(first)
358
+ candidate_indices.remove(first)
359
+ while len(chosen) < k and candidate_indices:
360
+ max_score = -1e9
361
+ max_idx = candidate_indices[0]
362
+ for idx in candidate_indices:
363
+ sim_q = sim_to_query[idx]
364
+ sim_d = max(st_util.cos_sim(cand_embs[idx], cand_embs[chosen]).cpu().numpy().reshape(-1))
365
+ mmr_score = lambda_param * sim_q - (1.0 - lambda_param) * sim_d
366
+ if mmr_score > max_score:
367
+ max_score = mmr_score
368
+ max_idx = idx
369
+ chosen.append(max_idx)
370
+ candidate_indices.remove(max_idx)
371
+ return chosen
372
+
373
+ # =========================
374
+ # Yahoo symbol search (for UX)
375
+ # =========================
376
+ def yahoo_search(query: str):
377
+ if not query or len(query.strip()) == 0:
378
+ return []
379
+ url = "https://query1.finance.yahoo.com/v1/finance/search"
380
+ params = {"q": query.strip(), "quotesCount": 10, "newsCount": 0}
381
+ headers = {"User-Agent": "Mozilla/5.0"}
382
+ try:
383
+ r = requests.get(url, params=params, headers=headers, timeout=10)
384
+ r.raise_for_status()
385
+ data = r.json()
386
+ out = []
387
+ for q in data.get("quotes", []):
388
+ sym = q.get("symbol")
389
+ name = q.get("shortname") or q.get("longname") or ""
390
+ exch = q.get("exchDisp") or ""
391
+ if sym and sym.isascii():
392
+ out.append(f"{sym} | {name} | {exch}")
393
+ if not out:
394
+ out = [f"{query.strip().upper()} | typed symbol | n/a"]
395
+ return out[:10]
396
+ except Exception:
397
+ return [f"{query.strip().upper()} | typed symbol | n/a"]
398
 
399
+ _last_matches = [] # updated on each search
 
400
 
401
+ # =========================
402
+ # Formatting helpers
403
+ # =========================
404
+ def fmt_pct(x: float) -> str:
405
+ return f"{x*100:.2f}%"
 
 
 
 
406
 
407
+ def fmt_money(x: float) -> str:
408
+ return f"${x:,.0f}"
 
 
409
 
410
+ # =========================
411
+ # Gradio callbacks
412
+ # =========================
413
+ HORIZON_YEARS = 5.0
414
+ RF_CODE = fred_series_for_horizon(HORIZON_YEARS)
415
+ RF_ANN = fetch_fred_yield_annual(RF_CODE)
416
+
417
+ def do_search(query):
418
+ global _last_matches
419
+ _last_matches = yahoo_search(query)
420
+ note = "Select a symbol from Matches, then click Add."
421
+ return note, gr.update(choices=_last_matches, value=None)
422
+
423
+ def add_symbol(selection: str, table: pd.DataFrame):
424
+ # Parse symbol from the dropdown selection. If selection not in choices, try to extract ticker anyway.
425
+ if selection and " | " in selection:
426
+ symbol = selection.split(" | ")[0].strip().upper()
427
+ elif isinstance(selection, str) and selection.strip():
428
+ symbol = selection.strip().upper()
429
+ else:
430
+ return table, "Pick a row from Matches first."
431
 
432
  current = []
433
+ if table is not None and len(table) > 0:
434
  current = [str(x).upper() for x in table["ticker"].tolist() if str(x) != "nan"]
435
+
436
  tickers = current if symbol in current else current + [symbol]
437
+ tickers = [t for t in tickers if t] # clean empties
438
 
439
+ # Validate using price availability
440
  val = validate_tickers(tickers, years=DEFAULT_LOOKBACK_YEARS)
441
  tickers = [t for t in tickers if t in val]
442
+ # Keep existing amounts where available
443
  amt_map = {}
444
+ if table is not None and len(table) > 0:
445
  for _, r in table.iterrows():
446
  t = str(r.get("ticker", "")).upper()
447
  if t in tickers:
448
  amt_map[t] = float(pd.to_numeric(r.get("amount_usd", 0.0), errors="coerce") or 0.0)
449
 
450
  new_table = pd.DataFrame({"ticker": tickers, "amount_usd": [amt_map.get(t, 0.0) for t in tickers]})
451
+ msg = f"Added {symbol}" if symbol in tickers else f"{symbol} not valid or no data"
452
  if len(new_table) > MAX_TICKERS:
453
  new_table = new_table.iloc[:MAX_TICKERS]
454
+ msg = f"Reached max of {MAX_TICKERS}"
455
+ return new_table, msg
456
 
457
+ def lock_ticker_column(tb: pd.DataFrame):
458
+ if tb is None or len(tb) == 0:
459
  return pd.DataFrame(columns=["ticker", "amount_usd"])
460
  tickers = [str(x).upper() for x in tb["ticker"].tolist()]
461
  amounts = pd.to_numeric(tb["amount_usd"], errors="coerce").fillna(0.0).tolist()
 
464
  amounts = amounts[:len(tickers)] + [0.0] * max(0, len(tickers) - len(amounts))
465
  return pd.DataFrame({"ticker": tickers, "amount_usd": amounts})
466
 
467
+ def set_horizon(years: float):
468
+ y = max(1.0, min(100.0, float(years)))
469
+ code = fred_series_for_horizon(y)
470
+ rf = fetch_fred_yield_annual(code)
471
+ global HORIZON_YEARS, RF_CODE, RF_ANN
472
+ HORIZON_YEARS = y
473
+ RF_CODE = code
474
+ RF_ANN = rf
475
+ return f"Risk-free series {code}. Latest annual rate {rf:.2%}. Computations will use this.", rf
476
+
477
+ def _table_from_weights(weights: Dict[str, float], gross_amt: float) -> pd.DataFrame:
478
+ items = []
479
+ for t, w in weights.items():
480
+ pct = float(w)
481
+ amt = float(w) * gross_amt
482
+ items.append({"ticker": t, "weight_%": round(pct * 100.0, 2), "amount_$": round(amt, 2)})
483
+ df = pd.DataFrame(items, columns=SUG_COLS)
484
+ # nice order by abs weight
485
+ df["absw"] = df["weight_%"].abs()
486
+ df = df.sort_values("absw", ascending=False).drop(columns=["absw"])
487
+ return df
488
+
489
+ def _weights_dict_from_row(r: pd.Series) -> Dict[str, float]:
490
+ ts = [t.strip().upper() for t in str(r["tickers"]).split(",")]
491
+ ws = [float(x) for x in str(r["weights"]).split(",")]
492
+ wmap = {}
493
+ for i in range(min(len(ts), len(ws))):
494
+ wmap[ts[i]] = ws[i]
495
+ # normalize to gross 1
496
+ gross = sum(abs(v) for v in wmap.values())
497
+ if gross <= 1e-12:
498
+ return {}
499
+ return {k: v / gross for k, v in wmap.items()}
500
+
501
+ def compute(lookback_years: int,
502
+ table: Optional[pd.DataFrame],
503
+ risk_bucket: str,
504
+ horizon_years: float):
505
+
506
+ # --- sanitize input table
507
+ if table is None or len(table) == 0:
508
+ return (None, "Add at least one ticker", "", pd.DataFrame(columns=POS_COLS),
509
+ pd.DataFrame(columns=SUG_COLS), pd.DataFrame(columns=SUG_COLS),
510
+ pd.DataFrame(columns=SUG_COLS), pd.DataFrame(columns=EFF_COLS),
511
+ pd.DataFrame(columns=EFF_COLS), json.dumps([]), 1, "No suggestions yet.")
512
+
513
+ df = table.copy().dropna()
514
  df["ticker"] = df["ticker"].astype(str).str.upper().str.strip()
515
  df["amount_usd"] = pd.to_numeric(df["amount_usd"], errors="coerce").fillna(0.0)
516
 
517
  symbols = [t for t in df["ticker"].tolist() if t]
518
+ symbols = validate_tickers(symbols, lookback_years)
519
  if len(symbols) == 0:
520
+ return (None, "Could not validate any tickers", "Universe invalid",
521
+ pd.DataFrame(columns=POS_COLS),
522
+ pd.DataFrame(columns=SUG_COLS), pd.DataFrame(columns=SUG_COLS),
523
+ pd.DataFrame(columns=SUG_COLS), pd.DataFrame(columns=EFF_COLS),
524
+ pd.DataFrame(columns=EFF_COLS), json.dumps([]), 1, "No suggestions.")
525
+
526
+ # --- universe & amounts
527
+ universe = sorted(set([s for s in symbols if s != MARKET_TICKER] + [MARKET_TICKER]))
 
528
  df = df[df["ticker"].isin(symbols)].copy()
529
  amounts = {r["ticker"]: float(r["amount_usd"]) for _, r in df.iterrows()}
530
+ gross_amt = sum(abs(v) for v in amounts.values())
531
+ if gross_amt <= 1e-9:
532
+ return (None, "All amounts are zero", "Universe ok", pd.DataFrame(columns=POS_COLS),
533
+ pd.DataFrame(columns=SUG_COLS), pd.DataFrame(columns=SUG_COLS),
534
+ pd.DataFrame(columns=SUG_COLS), pd.DataFrame(columns=EFF_COLS),
535
+ pd.DataFrame(columns=EFF_COLS), json.dumps([]), 1, "No suggestions.")
536
+
537
+ weights = {k: v / gross_amt for k, v in amounts.items()}
538
+
539
+ # --- risk free & moments
540
+ rf_code = fred_series_for_horizon(horizon_years)
541
+ rf_ann = fetch_fred_yield_annual(rf_code)
542
+ moms = estimate_all_moments_aligned(universe, lookback_years, rf_ann)
543
  betas, covA, erp_ann, sigma_mkt = moms["betas"], moms["cov_ann"], moms["erp_ann"], moms["sigma_m_ann"]
544
 
545
+ # --- portfolio stats (CAPM return + historical sigma)
546
+ beta_p, er_capm_p, sigma_p = portfolio_stats(weights, covA, betas, rf_ann, erp_ann)
547
+
548
+ # --- efficient alternatives on CML
549
+ a_sigma, b_sigma, mu_eff_sigma = efficient_same_sigma(sigma_p, rf_ann, erp_ann, sigma_mkt)
550
+ a_mu, b_mu, sigma_eff_mu = efficient_same_return(er_capm_p, rf_ann, erp_ann, sigma_mkt)
551
+
552
+ eff_same_sigma_tbl = _table_from_weights({MARKET_TICKER: a_sigma, BILLS_TICKER: b_sigma}, gross_amt)
553
+ eff_same_mu_tbl = _table_from_weights({MARKET_TICKER: a_mu, BILLS_TICKER: b_mu}, gross_amt)
554
+
555
+ # --- build synthetic dataset (based ONLY on this universe)
556
+ synth = build_synth_dataset(universe, covA, betas, rf_ann, erp_ann, n_rows=N_SYNTH, seed=777)
557
+
558
+ # --- risk buckets by sigma (absolute percentage points around median)
559
+ median_sigma = float(synth["sigma"].median()) if len(synth) else sigma_p
560
+ low_max = max(float(synth["sigma"].min()), median_sigma - 0.05) # 5% below median
561
+ high_min = median_sigma + 0.05
562
+
563
+ if risk_bucket == "Low":
564
+ cand_df = synth[synth["sigma"] <= low_max].copy()
565
+ elif risk_bucket == "High":
566
+ cand_df = synth[synth["sigma"] >= high_min].copy()
567
+ else: # Medium
568
+ cand_df = synth[(synth["sigma"] > low_max) & (synth["sigma"] < high_min)].copy()
569
+
570
+ if len(cand_df) == 0:
571
+ cand_df = synth.copy()
572
+
573
+ # --- embed all candidates + query, and pick 3 via MMR for diversity
574
+ embed = get_embedder()
575
+ cand_sentences = cand_df.apply(row_to_sentence, axis=1).tolist()
576
+
577
+ # query sentence derived from user's portfolio + bucket
578
+ cur_pairs = ", ".join([f"{k}:{v:+.2f}" for k, v in sorted(weights.items())])
579
+ q_sentence = f"user portfolio ({risk_bucket} risk); capm_target {er_capm_p:.4f}; sigma_hist {sigma_p:.4f}; exposures {cur_pairs}"
580
+
581
+ cand_embs = embed.encode(cand_sentences, convert_to_tensor=True, normalize_embeddings=True, batch_size=64, show_progress_bar=False)
582
+ q_emb = embed.encode([q_sentence], convert_to_tensor=True, normalize_embeddings=True)[0]
583
+
584
+ # shortlist by similarity, then MMR
585
+ sims = st_util.cos_sim(q_emb, cand_embs)[0]
586
+ top_idx = sims.topk(k=min(MMR_K, len(cand_df))).indices.cpu().numpy().tolist()
587
+ shortlist_embs = cand_embs[top_idx]
588
+ mmr_local = mmr_select(q_emb, shortlist_embs, k=3, lambda_param=MMR_LAMBDA)
589
+ chosen = [top_idx[i] for i in mmr_local]
590
+ recs = cand_df.iloc[chosen].reset_index(drop=True)
591
+
592
+ # --- suggestion tables for 3 picks
593
+ suggs = []
594
+ for _, r in recs.iterrows():
595
+ wmap = _weights_dict_from_row(r)
596
+ suggs.append({
597
+ "weights": wmap,
598
+ "er_capm": float(r["er_capm"]),
599
+ "sigma": float(r["sigma"]),
600
+ "beta": float(r["beta"]),
601
+ "table": _table_from_weights(wmap, gross_amt)
602
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603
 
604
+ # --- plot
605
+ img = plot_cml(
606
+ rf_ann, erp_ann, sigma_mkt,
607
+ sigma_p, er_capm_p,
608
+ same_sigma_sigma=sigma_p, same_sigma_mu=mu_eff_sigma,
609
+ same_mu_sigma=sigma_eff_mu, same_mu_mu=er_capm_p
610
  )
611
 
612
+ # --- positions table (computed)
613
+ rows = []
614
+ for t in universe:
615
+ if t == MARKET_TICKER:
616
+ continue
617
+ rows.append({
618
+ "ticker": t,
619
+ "amount_usd": round(amounts.get(t, 0.0), 2),
620
+ "weight_exposure": round(weights.get(t, 0.0), 6),
621
+ "beta": round(betas.get(t, np.nan), 4) if t != MARKET_TICKER else 1.0
622
+ })
623
+ pos_table = pd.DataFrame(rows, columns=POS_COLS)
624
+
625
+ # --- info summary
626
+ info_lines = []
627
+ info_lines.append("### Inputs")
628
+ info_lines.append(f"- Lookback years **{int(lookback_years)}**")
629
+ info_lines.append(f"- Horizon years **{int(round(horizon_years))}**")
630
+ info_lines.append(f"- Risk-free **{fmt_pct(rf_ann)}** from **{rf_code}**")
631
+ info_lines.append(f"- Market ERP **{fmt_pct(erp_ann)}**")
632
+ info_lines.append(f"- Market σ **{fmt_pct(sigma_mkt)}**")
633
+ info_lines.append("")
634
+ info_lines.append("### Your portfolio (plotted as CAPM return, historical σ)")
635
+ info_lines.append(f"- Beta **{beta_p:.2f}**")
636
+ info_lines.append(f"- σ (historical) **{fmt_pct(sigma_p)}**")
637
+ info_lines.append(f"- E[return] (CAPM / SML) **{fmt_pct(er_capm_p)}**")
638
+ info_lines.append("")
639
+ info_lines.append("### Efficient alternatives on CML")
640
+ info_lines.append(f"- Same σ → Market **{a_sigma:.2f}**, Bills **{b_sigma:.2f}**, Return **{fmt_pct(mu_eff_sigma)}**")
641
+ info_lines.append(f"- Same μ → Market **{a_mu:.2f}**, Bills **{b_mu:.2f}**, σ **{fmt_pct(sigma_eff_mu)}**")
642
+ info_lines.append("")
643
+ info_lines.append(f"### Dataset-based suggestions (risk: **{risk_bucket}**)")
644
+ info_lines.append("Use the selector to flip between **Pick #1 / #2 / #3**. Table shows % exposure and $ amounts.")
645
+
646
+ # --- default suggestion shown (index 1)
647
+ current_idx = 1
648
+ current = suggs[current_idx - 1] if suggs else None
649
+ current_tbl = current["table"] if current else pd.DataFrame(columns=SUG_COLS)
650
+ current_msg = ("Pick #1 — "
651
+ f"E[μ] {fmt_pct(current['er_capm'])}, σ {fmt_pct(current['sigma'])}, β {current['beta']:.2f}"
652
+ ) if current else "No suggestion."
653
+
654
+ return (img,
655
+ "\n".join(info_lines),
656
+ f"Universe set to {', '.join(universe)}",
657
+ pos_table,
658
+ suggs[0]["table"] if len(suggs) >= 1 else pd.DataFrame(columns=SUG_COLS),
659
+ suggs[1]["table"] if len(suggs) >= 2 else pd.DataFrame(columns=SUG_COLS),
660
+ suggs[2]["table"] if len(suggs) >= 3 else pd.DataFrame(columns=SUG_COLS),
661
+ eff_same_sigma_tbl,
662
+ eff_same_mu_tbl,
663
+ json.dumps([{
664
+ "er_capm": s["er_capm"], "sigma": s["sigma"], "beta": s["beta"],
665
+ } for s in suggs]),
666
+ current_idx,
667
+ current_msg)
668
+
669
+ def on_pick_change(idx: int, meta_json: str):
670
+ try:
671
+ data = json.loads(meta_json)
672
+ except Exception:
673
+ data = []
674
+ if not data:
675
+ return "No suggestion."
676
+ i = int(idx) - 1
677
+ i = max(0, min(i, len(data)-1))
678
+ s = data[i]
679
+ return f"Pick #{i+1} — E[μ] {fmt_pct(s['er_capm'])}, σ {fmt_pct(s['sigma'])}, β {s['beta']:.2f}"
680
+
681
+ # =========================
682
+ # UI
683
+ # =========================
684
+ with gr.Blocks(title="Efficient Portfolio Advisor", css="""
685
+ #small-note {font-size: 12px; color:#666;}
686
+ """) as demo:
687
+
688
+ gr.Markdown("## Efficient Portfolio Advisor\n"
689
+ "Search symbols, enter **$ amounts**, set your **horizon**. "
690
+ "The plot shows your **CAPM expected return** vs **historical σ**, alongside the **CML**. "
691
+ "Recommendations are generated from a **synthetic dataset (1000 portfolios)** and ranked with **local embeddings (BGE-base)** for relevance + diversity.")
692
+
693
+ with gr.Tab("Build Portfolio"):
694
+ with gr.Row():
695
+ with gr.Column(scale=1):
696
+ q = gr.Textbox(label="Search symbol")
697
+ search_note = gr.Markdown(elem_id="small-note")
698
+ matches = gr.Dropdown(choices=[], label="Matches", value=None)
699
+ search_btn = gr.Button("Search")
700
+ add_btn = gr.Button("Add selected to portfolio")
701
+
702
+ gr.Markdown("### Positions (enter dollars; negatives allowed for shorts)")
703
+ table = gr.Dataframe(
704
+ headers=["ticker", "amount_usd"],
705
+ datatype=["str", "number"],
706
+ row_count=0,
707
+ col_count=(2, "fixed"),
708
+ wrap=True
709
+ )
710
+
711
+ with gr.Column(scale=1):
712
+ horizon = gr.Slider(1, 30, value=DEFAULT_LOOKBACK_YEARS, step=1, label="Investment horizon (years)")
713
+ lookback = gr.Slider(1, 10, value=DEFAULT_LOOKBACK_YEARS, step=1, label="Lookback (years) for β and σ")
714
+ risk_bucket = gr.Radio(["Low", "Medium", "High"], value="Medium", label="Recommendation risk level")
715
+ run_btn = gr.Button("Compute")
716
+
717
+ rf_msg = gr.Textbox(label="Risk-free source / status", interactive=False)
718
+ search_btn.click(fn=do_search, inputs=q, outputs=[search_note, matches])
719
+ add_btn.click(fn=add_symbol, inputs=[matches, table], outputs=[table, search_note])
720
+ table.change(fn=lock_ticker_column, inputs=table, outputs=table)
721
+ horizon.change(fn=set_horizon, inputs=horizon, outputs=[rf_msg, gr.State()]) # rf_msg + silent
722
+
723
+ with gr.Tab("Results"):
724
+ with gr.Row():
725
+ with gr.Column(scale=1):
726
+ plot = gr.Image(label="Capital Market Line", type="pil")
727
+ summary = gr.Markdown(label="Summary")
728
+ universe_msg = gr.Textbox(label="Universe status", interactive=False)
729
+
730
+ with gr.Column(scale=1):
731
+ positions = gr.Dataframe(
732
+ label="Computed positions",
733
+ headers=POS_COLS,
734
+ datatype=["str", "number", "number", "number"],
735
+ col_count=(len(POS_COLS), "fixed"),
736
+ interactive=False
737
+ )
738
+
739
+ gr.Markdown("### Recommendations (always from embeddings)")
740
+ with gr.Row():
741
+ sugg1 = gr.Dataframe(label="Pick #1", interactive=False)
742
+ sugg2 = gr.Dataframe(label="Pick #2", interactive=False)
743
+ sugg3 = gr.Dataframe(label="Pick #3", interactive=False)
744
+
745
+ with gr.Row():
746
+ pick_idx = gr.Slider(1, 3, value=1, step=1, label="Carousel: show Pick #")
747
+ pick_meta = gr.Textbox(value="[]", visible=False)
748
+ pick_msg = gr.Markdown("")
749
+
750
+ gr.Markdown("### Efficient alternatives on the CML")
751
+ eff_same_sigma_tbl = gr.Dataframe(label="Efficient: Same σ", interactive=False)
752
+ eff_same_mu_tbl = gr.Dataframe(label="Efficient: Same μ", interactive=False)
753
+
754
+ run_btn.click(
755
+ fn=compute,
756
+ inputs=[lookback, table, risk_bucket, horizon],
757
+ outputs=[
758
+ plot, summary, universe_msg, positions,
759
+ sugg1, sugg2, sugg3,
760
+ eff_same_sigma_tbl, eff_same_mu_tbl,
761
+ pick_meta, pick_idx, pick_msg
762
+ ]
763
+ )
764
+ pick_idx.change(fn=on_pick_change, inputs=[pick_idx, pick_meta], outputs=pick_msg)
765
+
766
+ with gr.Tab("About"):
767
+ gr.Markdown(
768
+ "### Modality & Model\n"
769
+ "- **Modality**: Text (portfolio → text descriptions) powering **embeddings**\n"
770
+ "- **Embedding model**: `BAAI/bge-base-en-v1.5` (local, downloaded once; no API)\n\n"
771
+ "### Use case\n"
772
+ "Given a portfolio, we build a synthetic dataset of 1,000 alternative mixes **using the same tickers**, "
773
+ "compute each mix’s **CAPM return, σ, and β**, and rank candidates with embeddings to return **3 diverse, relevant suggestions** "
774
+ "for **Low / Medium / High** risk.\n\n"
775
+ "### Theory links\n"
776
+ "- Portfolio expected return in the plot uses **CAPM (SML)**, while σ is historical.\n"
777
+ "- The **CML** and the two **efficient alternatives** (same σ, same μ) use a mix of **Market (VOO)** and **Bills**."
778
+ )
779
 
780
  if __name__ == "__main__":
781
  demo.launch()