Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import io
|
| 3 |
import time
|
|
@@ -41,6 +43,7 @@ if "last_params" not in st.session_state:
|
|
| 41 |
with st.sidebar:
|
| 42 |
st.header("Parameters")
|
| 43 |
|
|
|
|
| 44 |
with st.expander("Data Window", expanded=False):
|
| 45 |
default_start = datetime(2015, 1, 1).date()
|
| 46 |
default_end = (datetime.today().date() + timedelta(days=1))
|
|
@@ -150,7 +153,6 @@ with st.sidebar:
|
|
| 150 |
heat_last_days=int(heat_last_days),
|
| 151 |
mom_look=int(mom_look),
|
| 152 |
)
|
| 153 |
-
# mark that results should be shown (and re-shown on reruns)
|
| 154 |
st.session_state.run_id = f"{time.time():.0f}"
|
| 155 |
|
| 156 |
if clear_clicked:
|
|
@@ -353,15 +355,13 @@ idx_volume = idx_df["Volume"].reindex(clean_close.index).ffill()
|
|
| 353 |
|
| 354 |
# ===================== SECTION 1 — Breadth Dashboard =====================
|
| 355 |
st.header("Breadth Dashboard")
|
| 356 |
-
# (… the rest of your original analysis/plotting code is unchanged …)
|
| 357 |
-
# NOTE: everything below stays exactly the same as your original file.
|
| 358 |
|
| 359 |
-
#
|
| 360 |
-
# 1) Methodology expander
|
| 361 |
with st.expander("Methodology", expanded=False):
|
| 362 |
# Overview
|
| 363 |
st.write("This panel tracks trend, participation, and momentum for a broad equity universe.")
|
| 364 |
st.write("Use it to judge trend quality, spot divergences, and gauge risk bias.")
|
|
|
|
| 365 |
# 1) Price trend (MAs, VWAP)
|
| 366 |
st.write("**Price trend**")
|
| 367 |
st.latex(r"\mathrm{SMA}_{n}(t)=\frac{1}{n}\sum_{k=0}^{n-1}P_{t-k}")
|
|
@@ -369,33 +369,39 @@ with st.expander("Methodology", expanded=False):
|
|
| 369 |
st.latex(r"\mathrm{VWAP}_{200w}(t)=\frac{\sum_{k=0}^{N-1}P_{t-k}V_{t-k}}{\sum_{k=0}^{N-1}V_{t-k}},\quad N\approx200\times5")
|
| 370 |
st.write("Price above both MAs and fast>slow = strong trend.")
|
| 371 |
st.write("Price below both MAs and fast<slow = weak trend.")
|
|
|
|
| 372 |
# 2) Participation breadth (% above MAs)
|
| 373 |
st.write("**Participation breadth**")
|
| 374 |
st.write("Share above n-day MA:")
|
| 375 |
st.latex(r"\%\,\text{Above}_n(t)=100\cdot\frac{\#\{i:\ P_{i,t}>\mathrm{SMA}_{n,i}(t)\}}{N}")
|
| 376 |
st.write("Zones: 0–20 weak, 20–50 neutral, 50–80 strong.")
|
| 377 |
st.write("Higher shares mean broad support for the trend.")
|
| 378 |
-
|
|
|
|
| 379 |
st.write("**Advance–Decline (A/D) line**")
|
| 380 |
st.latex(r"A_t=\#\{i:\ P_{i,t}>P_{i,t-1}\},\quad D_t=\#\{i:\ P_{i,t}<P_{i,t-1}\}")
|
| 381 |
st.latex(r"\mathrm{ADLine}_t=\sum_{u\le t}(A_u-D_u)")
|
| 382 |
st.write("Rising A/D confirms uptrends. Falling A/D warns of narrow leadership.")
|
|
|
|
| 383 |
# 4) Net new 52-week highs
|
| 384 |
st.write("**Net new 52-week highs**")
|
| 385 |
st.latex(r"H_{i,t}^{52}=\max_{u\in[t-251,t]}P_{i,u},\quad L_{i,t}^{52}=\min_{u\in[t-251,t]}P_{i,u}")
|
| 386 |
st.latex(r"\text{NewHighs}_t=\sum_i \mathbf{1}\{P_{i,t}=H_{i,t}^{52}\},\quad \text{NewLows}_t=\sum_i \mathbf{1}\{P_{i,t}=L_{i,t}^{52}\}")
|
| 387 |
st.latex(r"\text{NetHighs}_t=\text{NewHighs}_t-\text{NewLows}_t")
|
| 388 |
st.write("Positive and persistent net highs support trend durability.")
|
|
|
|
| 389 |
# 5) Smoothed advancing vs declining counts
|
| 390 |
st.write("**Advancing vs declining (smoothed)**")
|
| 391 |
st.latex(r"\overline{A}_t=\frac{1}{w}\sum_{k=0}^{w-1}A_{t-k},\quad \overline{D}_t=\frac{1}{w}\sum_{k=0}^{w-1}D_{t-k}")
|
| 392 |
st.write("Advancers > decliners over the window = constructive breadth.")
|
|
|
|
| 393 |
# 6) McClellan Oscillator
|
| 394 |
st.write("**McClellan Oscillator (MO)**")
|
| 395 |
st.latex(r"E^{(n)}_t=\text{EMA}_n(A_t-D_t)")
|
| 396 |
st.latex(r"\mathrm{MO}_t=E^{(19)}_t-E^{(39)}_t")
|
| 397 |
st.write("Zero-line up-cross = improving momentum. Down-cross = fading momentum.")
|
| 398 |
st.write("A 9-day EMA of MO can act as a signal line.")
|
|
|
|
| 399 |
# Practical reads
|
| 400 |
st.write("**Practical use**")
|
| 401 |
st.write("- Broad strength: % above 200-day ≥ 50% supports trends.")
|
|
@@ -546,75 +552,94 @@ fig.update_layout(
|
|
| 546 |
)
|
| 547 |
st.plotly_chart(fig, use_container_width=True)
|
| 548 |
|
| 549 |
-
#
|
| 550 |
with st.expander("Dynamic Interpretation", expanded=False):
|
| 551 |
buf = io.StringIO()
|
|
|
|
| 552 |
def _last_val(s):
|
| 553 |
s = s.dropna()
|
| 554 |
return s.iloc[-1] if len(s) else np.nan
|
|
|
|
| 555 |
def _last_date(s):
|
| 556 |
s = s.dropna()
|
| 557 |
return s.index[-1] if len(s) else None
|
|
|
|
| 558 |
def _pct(a, b):
|
| 559 |
if not np.isfinite(a) or not np.isfinite(b) or b == 0:
|
| 560 |
return np.nan
|
| 561 |
return (a - b) / b * 100.0
|
|
|
|
| 562 |
def _fmt_pct(x):
|
| 563 |
return "n/a" if not np.isfinite(x) else f"{x:.1f}%"
|
|
|
|
| 564 |
def _fmt_num(x):
|
| 565 |
return "n/a" if not np.isfinite(x) else f"{x:,.2f}"
|
| 566 |
|
| 567 |
as_of = _last_date(idx)
|
|
|
|
| 568 |
px = _last_val(idx)
|
| 569 |
ma50 = _last_val(sma_fast_idx)
|
| 570 |
ma200 = _last_val(sma_slow_idx)
|
| 571 |
vwap200 = _last_val(vwap_idx)
|
|
|
|
| 572 |
p50 = float(_last_val(pct_above_fast))
|
| 573 |
p200 = float(_last_val(pct_above_slow))
|
|
|
|
| 574 |
ad_now = _last_val(ad_line)
|
| 575 |
nh_now = int(_last_val(new_highs)) if np.isfinite(_last_val(new_highs)) else 0
|
| 576 |
nh_sma = float(_last_val(sma10_net_hi))
|
|
|
|
| 577 |
avg_adv_last = float(_last_val(avg_adv))
|
| 578 |
avg_decl_last = float(_last_val(avg_decl))
|
|
|
|
| 579 |
_ema19 = net_adv.ewm(span=int(mo_span_fast), adjust=False).mean()
|
| 580 |
_ema39 = net_adv.ewm(span=int(mo_span_slow), adjust=False).mean()
|
| 581 |
mc_osc2 = (_ema19 - _ema39).rename("MO")
|
| 582 |
mc_signal = mc_osc2.ewm(span=int(mo_signal_span), adjust=False).mean().rename("Signal")
|
|
|
|
| 583 |
mo_last = float(_last_val(mc_osc2))
|
| 584 |
mo_prev = float(_last_val(mc_osc2.shift(1)))
|
| 585 |
mo_5ago = float(_last_val(mc_osc2.shift(5)))
|
| 586 |
mo_slope5 = mo_last - mo_5ago
|
| 587 |
mo_sig_last = float(_last_val(mc_signal))
|
| 588 |
mo_sig_prev = float(_last_val(mc_signal.shift(1)))
|
|
|
|
| 589 |
mo_roll = mc_osc2.rolling(252, min_periods=126)
|
| 590 |
mo_mean = mo_roll.mean()
|
| 591 |
mo_std = mo_roll.std()
|
| 592 |
mo_z = (mc_osc2 - mo_mean) / mo_std
|
| 593 |
mo_z_last = float(_last_val(mo_z))
|
|
|
|
| 594 |
mo_abs = np.abs(mc_osc2.dropna())
|
| 595 |
if len(mo_abs) >= 20:
|
| 596 |
mo_ext = float(np.nanpercentile(mo_abs.tail(252), 90))
|
| 597 |
else:
|
| 598 |
mo_ext = np.nan
|
|
|
|
| 599 |
look_fast = 10
|
| 600 |
look_mid = 20
|
| 601 |
look_div = 63
|
|
|
|
| 602 |
ma50_slope = _last_val(sma_fast_idx.diff(look_fast))
|
| 603 |
ma200_slope = _last_val(sma_slow_idx.diff(look_mid))
|
| 604 |
p50_chg = p50 - float(_last_val(pct_above_fast.shift(look_fast)))
|
| 605 |
p200_chg = p200 - float(_last_val(pct_above_slow.shift(look_fast)))
|
| 606 |
ad_mom = ad_now - float(_last_val(ad_line.shift(look_mid)))
|
|
|
|
| 607 |
d50 = _pct(px, ma50)
|
| 608 |
d200 = _pct(px, ma200)
|
| 609 |
dvw = _pct(px, vwap200)
|
| 610 |
h63 = float(_last_val(idx.rolling(look_div).max()))
|
| 611 |
dd63 = _pct(px, h63) if np.isfinite(h63) else np.nan
|
|
|
|
| 612 |
ad_63h = float(_last_val(ad_line.rolling(look_div).max()))
|
| 613 |
mo_63h = float(_last_val(mc_osc2.rolling(look_div).max()))
|
| 614 |
near_high_px = np.isfinite(h63) and np.isfinite(px) and px >= 0.995 * h63
|
| 615 |
near_high_ad = np.isfinite(ad_63h) and np.isfinite(ad_now) and ad_now >= 0.995 * ad_63h
|
| 616 |
near_high_mo = np.isfinite(mo_63h) and np.isfinite(mo_last) and mo_last >= 0.95 * mo_63h
|
|
|
|
| 617 |
breadth_thrust = (p50 >= 55) and (p50_chg >= 20)
|
|
|
|
| 618 |
score = 0
|
| 619 |
score += 1 if px > ma50 else 0
|
| 620 |
score += 1 if px > ma200 else 0
|
|
@@ -626,6 +651,7 @@ with st.expander("Dynamic Interpretation", expanded=False):
|
|
| 626 |
score += 1 if nh_now > 0 and nh_sma >= 0 else 0
|
| 627 |
score += 1 if avg_adv_last > avg_decl_last else 0
|
| 628 |
score += 1 if (mo_last > 0 and mo_slope5 > 0) else 0
|
|
|
|
| 629 |
if score >= 8:
|
| 630 |
regime = "Risk-on bias"
|
| 631 |
elif score >= 5:
|
|
@@ -634,6 +660,7 @@ with st.expander("Dynamic Interpretation", expanded=False):
|
|
| 634 |
regime = "Risk-off bias"
|
| 635 |
|
| 636 |
print(f"=== Market breadth narrative — {as_of.date() if as_of is not None else 'N/A'} ===", file=buf)
|
|
|
|
| 637 |
# [Trend]
|
| 638 |
print("\n[Trend]", file=buf)
|
| 639 |
if np.isfinite(px) and np.isfinite(ma50) and np.isfinite(ma200):
|
|
@@ -802,557 +829,560 @@ with st.expander("Dynamic Interpretation", expanded=False):
|
|
| 802 |
|
| 803 |
st.text(buf.getvalue())
|
| 804 |
|
| 805 |
-
|
| 806 |
-
|
| 807 |
-
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
|
| 812 |
-
|
| 813 |
-
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
| 817 |
-
|
| 818 |
-
|
| 819 |
-
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
|
| 823 |
-
|
| 824 |
-
|
| 825 |
-
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
| 832 |
-
|
| 833 |
-
|
| 834 |
-
|
| 835 |
-
|
| 836 |
-
|
| 837 |
-
|
| 838 |
-
|
| 839 |
-
|
| 840 |
-
|
| 841 |
-
|
| 842 |
-
|
| 843 |
-
|
| 844 |
-
|
| 845 |
-
|
| 846 |
-
|
| 847 |
-
|
| 848 |
-
|
| 849 |
-
|
| 850 |
-
|
| 851 |
-
|
| 852 |
-
|
| 853 |
-
|
| 854 |
-
|
| 855 |
-
|
| 856 |
-
|
| 857 |
-
|
| 858 |
-
|
| 859 |
-
|
| 860 |
-
|
| 861 |
-
|
| 862 |
-
|
| 863 |
-
|
| 864 |
-
|
| 865 |
-
|
| 866 |
-
|
| 867 |
-
|
| 868 |
-
|
| 869 |
-
|
| 870 |
-
|
| 871 |
-
|
| 872 |
-
|
| 873 |
-
|
| 874 |
-
|
| 875 |
-
|
| 876 |
-
|
| 877 |
-
|
| 878 |
-
|
| 879 |
-
|
| 880 |
-
|
| 881 |
-
|
| 882 |
-
|
| 883 |
-
x=rebased.index, y=rebased[t], name=t, mode="lines",
|
| 884 |
-
line=dict(width=1, color="rgba(160,160,160,0.4)"),
|
| 885 |
-
hovertemplate=hover_tmpl(t), showlegend=False
|
| 886 |
-
))
|
| 887 |
-
for t in mag7:
|
| 888 |
-
fig2.add_trace(go.Scatter(
|
| 889 |
-
x=rebased.index, y=rebased[t], name=t, mode="lines",
|
| 890 |
-
line=dict(width=2, color=mag_colors.get(t, "#ffffff")),
|
| 891 |
-
hovertemplate=hover_tmpl(t)
|
| 892 |
-
))
|
| 893 |
-
for t in top5:
|
| 894 |
-
fig2.add_trace(go.Scatter(
|
| 895 |
-
x=rebased.index, y=rebased[t], name=f"Top {t}", mode="lines",
|
| 896 |
-
line=dict(width=2, color="lime"),
|
| 897 |
-
hovertemplate=hover_tmpl(t), showlegend=False
|
| 898 |
-
))
|
| 899 |
-
for t in worst5:
|
| 900 |
-
fig2.add_trace(go.Scatter(
|
| 901 |
-
x=rebased.index, y=rebased[t], name=f"Worst {t}", mode="lines",
|
| 902 |
-
line=dict(width=2, color="red", dash="dash"),
|
| 903 |
-
hovertemplate=hover_tmpl(t), showlegend=False
|
| 904 |
-
))
|
| 905 |
fig2.add_trace(go.Scatter(
|
| 906 |
-
x=
|
| 907 |
-
line=dict(width=
|
|
|
|
| 908 |
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 909 |
|
| 910 |
-
|
| 911 |
-
|
| 912 |
-
|
| 913 |
-
y_range = None
|
| 914 |
-
if len(vals) > 10:
|
| 915 |
-
qlo, qhi = vals.quantile([0.01, 0.99])
|
| 916 |
-
y_min = max(1e-2, qlo / y_pad)
|
| 917 |
-
y_max = max(y_min * 1.1, qhi * y_pad)
|
| 918 |
-
y_range = [np.log10(y_min), np.log10(y_max)]
|
| 919 |
-
|
| 920 |
-
fig2.update_yaxes(type="log", range=y_range, title=f"Rebased Price (start = {int(base)})")
|
| 921 |
-
fig2.update_xaxes(title="Date")
|
| 922 |
-
fig2.update_layout(
|
| 923 |
-
template="plotly_dark",
|
| 924 |
-
height=700,
|
| 925 |
-
margin=dict(l=60, r=30, t=70, b=90),
|
| 926 |
-
title=f"Price Level Comparison (Rebased, Log Scale) — Last {n_days} Sessions",
|
| 927 |
-
legend=dict(orientation="h", y=-0.18, yanchor="top", x=0, xanchor="left"),
|
| 928 |
-
hovermode="closest",
|
| 929 |
-
font=dict(color="white")
|
| 930 |
-
)
|
| 931 |
-
st.plotly_chart(fig2, use_container_width=True)
|
| 932 |
-
|
| 933 |
-
with st.expander("Dynamic Interpretation", expanded=False):
|
| 934 |
-
buf2 = io.StringIO()
|
| 935 |
|
| 936 |
-
|
| 937 |
-
|
| 938 |
|
| 939 |
-
|
| 940 |
-
|
| 941 |
|
| 942 |
-
|
| 943 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 944 |
else:
|
| 945 |
-
|
| 946 |
-
|
| 947 |
-
|
| 948 |
-
|
| 949 |
-
|
| 950 |
-
|
| 951 |
-
|
| 952 |
-
|
| 953 |
-
|
| 954 |
-
|
| 955 |
-
|
| 956 |
-
|
| 957 |
-
|
| 958 |
-
|
| 959 |
-
|
| 960 |
-
|
| 961 |
-
|
| 962 |
-
|
| 963 |
-
|
| 964 |
-
|
| 965 |
-
|
| 966 |
-
|
| 967 |
-
top_contrib = float((perf_last.sort_values(ascending=False).head(topN) - base).sum() / gains_all * 100)
|
| 968 |
-
|
| 969 |
-
rets = rebased.pct_change().replace([np.inf, -np.inf], np.nan).dropna(how="all")
|
| 970 |
-
spx_r = pd.Series(spx_rebased, index=spx_rebased.index).pct_change()
|
| 971 |
-
corr_to_spx = rets.corrwith(spx_r, axis=0).dropna()
|
| 972 |
-
corr_med = float(corr_to_spx.median()) if len(corr_to_spx) else np.nan
|
| 973 |
-
low_corr_share = float((corr_to_spx < 0.3).mean() * 100) if len(corr_to_spx) else np.nan
|
| 974 |
-
|
| 975 |
-
spx_chg = spx_last - base
|
| 976 |
-
k = min(5, n_names)
|
| 977 |
-
leaders = perf_last.sort_values(ascending=False).head(k)
|
| 978 |
-
laggards = perf_last.sort_values(ascending=True).head(k)
|
| 979 |
-
|
| 980 |
-
print(f"=== Rebased performance read — {as_of} (window: {n_days} sessions) ===\n", file=buf2)
|
| 981 |
-
print("[Market]", file=buf2)
|
| 982 |
-
print(f"S&P 500 is {_fmt_pct(spx_chg)} over the window.", file=buf2)
|
| 983 |
-
print(f"Equal-weight average is {_fmt_pct(eq_avg - base)}, median is {_fmt_pct(eq_med - base)}.", file=buf2)
|
| 984 |
-
if np.isfinite(eq_avg) and np.isfinite(spx_last):
|
| 985 |
-
gap = (eq_avg - spx_last)
|
| 986 |
-
side = "above" if gap >= 0 else "below"
|
| 987 |
-
print(f"Equal-weight sits {_fmt_pct(abs(gap))} {side} the index.", file=buf2)
|
| 988 |
-
print("", file=buf2)
|
| 989 |
-
|
| 990 |
-
print("[Breadth]", file=buf2)
|
| 991 |
-
print(f"{_fmt_pct(pct_pos)} of names are up. {_fmt_pct(pct_beat)} beat the index.", file=buf2)
|
| 992 |
-
print(f"Dispersion std is {_fmt_num(disp_std)} points on the rebased scale.", file=buf2)
|
| 993 |
-
print(f"IQR width is {_fmt_num(iqr_w)} points ({_fmt_num(iqr_lo)} to {_fmt_num(iqr_hi)}).", file=buf2)
|
| 994 |
-
if pct_pos >= 70 and pct_beat >= 55:
|
| 995 |
-
print("Rally is broad. Leadership is shared across many names.", file=buf2)
|
| 996 |
-
elif pct_pos <= 35 and pct_beat <= 45:
|
| 997 |
-
print("Rally is narrow or absent. Leadership is concentrated.", file=buf2)
|
| 998 |
-
else:
|
| 999 |
-
print("Breadth is mixed. The tape can rotate quickly.", file=buf2)
|
| 1000 |
-
print("", file=buf2)
|
| 1001 |
-
|
| 1002 |
-
print("[Concentration]", file=buf2)
|
| 1003 |
-
if np.isfinite(top_contrib):
|
| 1004 |
-
print(f"Top {topN} names explain {_fmt_pct(top_contrib)} of equal-weight gains.", file=buf2)
|
| 1005 |
-
if len(mag7_in):
|
| 1006 |
-
print(f"MAG7 equal-weight is {_fmt_pct(mag7_mean - base)}. Rest is {_fmt_pct(rest_mean - base)}.", file=buf2)
|
| 1007 |
-
if np.isfinite(mag7_beat):
|
| 1008 |
-
print(f"{_fmt_pct(mag7_beat)} of MAG7 beat the index.", file=buf2)
|
| 1009 |
-
else:
|
| 1010 |
-
print("MAG7 tickers are not all present in this window.", file=buf2)
|
| 1011 |
-
print("", file=buf2)
|
| 1012 |
-
|
| 1013 |
-
print("[Correlation]", file=buf2)
|
| 1014 |
-
if len(corr_to_spx):
|
| 1015 |
-
print(f"Median correlation to the index is {_fmt_num(corr_med)}.", file=buf2)
|
| 1016 |
-
print(f"{_fmt_pct(low_corr_share)} of names show low correlation (<0.30).", file=buf2)
|
| 1017 |
-
if np.isfinite(corr_med) and corr_med < 0.5:
|
| 1018 |
-
print("Factor dispersion is high. Stock picking matters more.", file=buf2)
|
| 1019 |
-
elif np.isfinite(corr_med) and corr_med > 0.8:
|
| 1020 |
-
print("Common beta dominates. Moves are index-driven.", file=buf2)
|
| 1021 |
-
else:
|
| 1022 |
-
print("Correlation sits in a middle zone. Rotation can continue.", file=buf2)
|
| 1023 |
else:
|
| 1024 |
-
print("
|
| 1025 |
-
|
| 1026 |
-
|
| 1027 |
-
|
| 1028 |
-
for t, v in leaders.items():
|
| 1029 |
-
print(f" {t}: {_fmt_pct(v - base)}", file=buf2)
|
| 1030 |
-
|
| 1031 |
-
print("\n[Laggards]", file=buf2)
|
| 1032 |
-
for t, v in laggards.items():
|
| 1033 |
-
print(f" {t}: {_fmt_pct(v - base)}", file=buf2)
|
| 1034 |
-
|
| 1035 |
-
print("\n[What to monitor]", file=buf2)
|
| 1036 |
-
print("Watch the gap between equal-weight and index. A widening gap signals concentration risk.", file=buf2)
|
| 1037 |
-
print("Track the share beating the index. Sustained readings above 55% support trend durability.", file=buf2)
|
| 1038 |
-
print("Watch median correlation. Falling correlation favors dispersion and relative value setups.", file=buf2)
|
| 1039 |
-
|
| 1040 |
-
st.text(buf2.getvalue())
|
| 1041 |
-
|
| 1042 |
-
# ===================== SECTION 3 — Daily Return Heatmap =====================
|
| 1043 |
-
st.header("Daily Return Heatmap")
|
| 1044 |
-
|
| 1045 |
-
with st.expander("Methodology", expanded=False):
|
| 1046 |
-
st.write("Shows daily % returns for all names over the selected window. Highlights broad up/down days, dispersion, and leadership.")
|
| 1047 |
-
st.write("Use it to spot synchronized moves, stress days, and rotation across the universe.")
|
| 1048 |
-
|
| 1049 |
-
st.write("**Daily return (per name)**")
|
| 1050 |
-
st.latex(r"r_{i,t}=\frac{P_{i,t}}{P_{i,t-1}}-1")
|
| 1051 |
-
|
| 1052 |
-
st.write("**Heatmap values**")
|
| 1053 |
-
st.write("Cells display r_{i,t}. Tickers are sorted by the most recent day’s return so leaders/laggards are obvious.")
|
| 1054 |
-
|
| 1055 |
-
st.write("**Robust color scale (cap extremes)**")
|
| 1056 |
-
st.latex(r"c=\operatorname{P95}\left(\left|r_{i,t}\right|\right)\ \text{over the window}")
|
| 1057 |
-
st.latex(r"\text{color range}=[-c,\,+c],\quad \text{midpoint}=0")
|
| 1058 |
-
st.write("Capping avoids a few outliers overpowering the color scale.")
|
| 1059 |
-
|
| 1060 |
-
st.write("**Breadth and dispersion (how to read)**")
|
| 1061 |
-
st.latex(r"\text{Up share}_t=100\cdot \frac{1}{N}\sum_{i=1}^{N}\mathbf{1}[r_{i,t}>0]")
|
| 1062 |
-
st.latex(r"\sigma_{\text{cs},t}=\operatorname{stdev}\{r_{i,t}\}_{i=1}^{N}")
|
| 1063 |
-
st.write("- High up share with low dispersion = uniform risk-on.")
|
| 1064 |
-
st.write("- Mixed colors with high dispersion = rotation and factor spread.")
|
| 1065 |
-
st.write("- Clusters of red/green by industry often flag sector moves.")
|
| 1066 |
-
|
| 1067 |
-
st.write("**Large-move counts (quick context)**")
|
| 1068 |
-
st.latex(r"\text{BigUp}_t=\sum_{i}\mathbf{1}[r_{i,t}\ge \tau],\quad \text{BigDn}_t=\sum_{i}\mathbf{1}[r_{i,t}\le -\tau]")
|
| 1069 |
-
st.latex(r"\tau=2\% \ \text{(default)}")
|
| 1070 |
-
st.write("A jump in BigUp/BigDn signals a thrust or a shock day.")
|
| 1071 |
-
|
| 1072 |
-
st.write("**Short-horizon follow-through**")
|
| 1073 |
-
st.latex(r"\bar{r}_{i,t}^{(w)}=\frac{1}{w}\sum_{k=0}^{w-1} r_{i,t-k},\quad w=5")
|
| 1074 |
-
st.write("A broad rise in 5-day averages supports continuation; a fade warns of stall.")
|
| 1075 |
-
|
| 1076 |
-
st.write("**Practical reads**")
|
| 1077 |
-
st.write("- Many greens, low dispersion: beta tailwind; index setups work.")
|
| 1078 |
-
st.write("- Greens + high dispersion: stock picking/sector tilts matter.")
|
| 1079 |
-
st.write("- Reds concentrated in a few groups: rotate risk, not necessarily de-risk.")
|
| 1080 |
-
st.write("- Extreme red breadth with spikes in dispersion: watch liquidity and reduce gross.")
|
| 1081 |
-
|
| 1082 |
-
|
| 1083 |
-
# Daily returns last N days
|
| 1084 |
-
ret_daily = clean_close.pct_change().iloc[1:]
|
| 1085 |
-
ret_window = int(heat_last_days)
|
| 1086 |
-
ret_last = ret_daily.iloc[-ret_window:]
|
| 1087 |
-
if ret_last.empty:
|
| 1088 |
-
st.warning("Not enough data for the daily return heatmap.")
|
| 1089 |
-
else:
|
| 1090 |
-
order = ret_last.iloc[-1].sort_values(ascending=True).index
|
| 1091 |
-
ret_last = ret_last[order]
|
| 1092 |
-
|
| 1093 |
-
abs_max = np.nanpercentile(np.abs(ret_last.values), 95)
|
| 1094 |
-
z = ret_last.T.values
|
| 1095 |
-
x = ret_last.index
|
| 1096 |
-
y = list(order)
|
| 1097 |
-
|
| 1098 |
-
n_dates = len(x)
|
| 1099 |
-
step = max(1, n_dates // 10)
|
| 1100 |
-
xtick_vals = x[::step]
|
| 1101 |
-
xtick_texts = [ts.strftime("%Y-%m-%d") for ts in xtick_vals]
|
| 1102 |
-
|
| 1103 |
-
fig_hm = go.Figure(go.Heatmap(
|
| 1104 |
-
z=z, x=x, y=y,
|
| 1105 |
-
colorscale="RdYlGn",
|
| 1106 |
-
zmin=-abs_max, zmax=abs_max, zmid=0,
|
| 1107 |
-
colorbar=dict(title="Daily Return", tickformat=".0%"),
|
| 1108 |
-
hovertemplate="%{y}<br>%{x|%Y-%m-%d}<br>%{z:.2%}<extra></extra>"
|
| 1109 |
-
))
|
| 1110 |
|
| 1111 |
-
|
| 1112 |
-
|
| 1113 |
-
|
| 1114 |
-
title=f"Last {ret_window}-Day Daily Return Heatmap",
|
| 1115 |
-
height=height,
|
| 1116 |
-
margin=dict(l=100, r=40, t=60, b=60),
|
| 1117 |
-
font=dict(color="white")
|
| 1118 |
-
)
|
| 1119 |
-
fig_hm.update_yaxes(title="Tickers (sorted by latest daily return)", tickfont=dict(size=8))
|
| 1120 |
-
fig_hm.update_xaxes(title="Date", tickmode="array", tickvals=xtick_vals, ticktext=xtick_texts, tickangle=45)
|
| 1121 |
-
st.plotly_chart(fig_hm, use_container_width=True)
|
| 1122 |
|
| 1123 |
-
|
| 1124 |
-
|
|
|
|
| 1125 |
|
| 1126 |
-
|
| 1127 |
-
|
|
|
|
|
|
|
| 1128 |
|
| 1129 |
-
|
| 1130 |
-
return "n/a" if pd.isna(x) else f"{x*100:.2f}%"
|
| 1131 |
|
| 1132 |
-
|
| 1133 |
-
|
| 1134 |
-
else:
|
| 1135 |
-
as_of = ret_last.index[-1].date()
|
| 1136 |
-
last = ret_last.iloc[-1]
|
| 1137 |
-
N = last.shape[0]
|
| 1138 |
-
up = int((last > 0).sum())
|
| 1139 |
-
dn = int((last < 0).sum())
|
| 1140 |
-
flat = int(N - up - dn)
|
| 1141 |
-
mean = float(last.mean()); med = float(last.median())
|
| 1142 |
-
std = float(last.std(ddof=0))
|
| 1143 |
-
q25 = float(last.quantile(0.25)); q75 = float(last.quantile(0.75))
|
| 1144 |
-
iqr = q75 - q25
|
| 1145 |
-
thr = 0.02
|
| 1146 |
-
big_up = int((last >= thr).sum())
|
| 1147 |
-
big_dn = int((last <= -thr).sum())
|
| 1148 |
-
w = min(5, len(ret_last))
|
| 1149 |
-
avg_w = ret_last.tail(w).mean()
|
| 1150 |
-
pct_pos_w = float((avg_w > 0).mean())
|
| 1151 |
-
cs_std = ret_last.std(axis=1, ddof=0)
|
| 1152 |
-
today_std = float(cs_std.iloc[-1])
|
| 1153 |
-
disp_pct = float((cs_std <= today_std).mean())
|
| 1154 |
-
k = min(10, N)
|
| 1155 |
-
leaders = last.sort_values(ascending=False).head(k)
|
| 1156 |
-
laggards = last.sort_values(ascending=True ).head(k)
|
| 1157 |
-
|
| 1158 |
-
def _streak(s, max_look=20):
|
| 1159 |
-
v = s.tail(max_look).to_numpy(dtype=float)
|
| 1160 |
-
sign = np.sign(v); sign[np.isnan(sign)] = 0
|
| 1161 |
-
if len(sign) == 0 or sign[-1] == 0:
|
| 1162 |
-
return 0
|
| 1163 |
-
tgt = sign[-1]; cnt = 0
|
| 1164 |
-
for x in sign[::-1]:
|
| 1165 |
-
if x == tgt: cnt += 1
|
| 1166 |
-
else: break
|
| 1167 |
-
return int(cnt if tgt > 0 else -cnt)
|
| 1168 |
-
|
| 1169 |
-
streaks = {t: _streak(ret_last[t]) for t in set(leaders.index).union(laggards.index)}
|
| 1170 |
-
|
| 1171 |
-
print(f"=== Daily return heatmap read — {as_of} (last {len(ret_last)} sessions) ===", file=buf3)
|
| 1172 |
-
print("\n[Today]", file=buf3)
|
| 1173 |
-
print(f"Up: {up}/{N} ({_pct(up/N)}). Down: {dn}/{N} ({_pct(dn/N)}). Flat: {flat}.", file=buf3)
|
| 1174 |
-
print(f"Mean: {_pp(mean)}. Median: {_pp(med)}. Std: {_pp(std)}. IQR: {_pp(iqr)}.", file=buf3)
|
| 1175 |
-
print(f"Moves ≥ {int(thr*100)}%: +{big_up}. Moves ≤ -{int(thr*100)}%: {big_dn}.", file=buf3)
|
| 1176 |
-
|
| 1177 |
-
print("\n[Recent breadth]", file=buf3)
|
| 1178 |
-
print(f"{_pct(pct_pos_w)} of names have a positive average over the last {w} sessions.", file=buf3)
|
| 1179 |
-
|
| 1180 |
-
print("\n[Dispersion]", file=buf3)
|
| 1181 |
-
print(f"Cross-section std today: {_pp(today_std)} (window percentile ~{disp_pct*100:.0f}th).", file=buf3)
|
| 1182 |
-
|
| 1183 |
-
print("\n[Leaders today]", file=buf3)
|
| 1184 |
-
for t, v in leaders.items():
|
| 1185 |
-
stv = streaks.get(t, 0)
|
| 1186 |
-
lab = ("flat" if stv == 0 else (f"{stv}d up" if stv > 0 else f"{-stv}d down"))
|
| 1187 |
-
print(f" {t}: {_pp(v)} ({lab})", file=buf3)
|
| 1188 |
-
|
| 1189 |
-
print("\n[Laggards today]", file=buf3)
|
| 1190 |
-
for t, v in laggards.items():
|
| 1191 |
-
stv = streaks.get(t, 0)
|
| 1192 |
-
lab = ("flat" if stv == 0 else (f"{stv}d up" if stv > 0 else f"{-stv}d down"))
|
| 1193 |
-
print(f" {t}: {_pp(v)} ({lab})", file=buf3)
|
| 1194 |
-
|
| 1195 |
-
print("\n[What to monitor]", file=buf3)
|
| 1196 |
-
print("Watch big-move counts and the 5-day positive share for follow-through.", file=buf3)
|
| 1197 |
-
print("Track dispersion; elevated dispersion favors relative moves over index moves.", file=buf3)
|
| 1198 |
-
|
| 1199 |
-
st.text(buf3.getvalue())
|
| 1200 |
-
|
| 1201 |
-
# ===================== SECTION 4 — Percentile Momentum Heatmap =====================
|
| 1202 |
-
st.header("Percentile Momentum Heatmap")
|
| 1203 |
-
|
| 1204 |
-
with st.expander("Methodology", expanded=False):
|
| 1205 |
-
st.write("Ranks each stock’s medium-horizon return against the cross-section each day.")
|
| 1206 |
-
st.write("Use it to spot broad momentum, rotation, and persistence.")
|
| 1207 |
-
|
| 1208 |
-
st.write("**n-day return (per name)**")
|
| 1209 |
-
st.latex(r"r^{(n)}_{i,t}=\frac{P_{i,t}}{P_{i,t-n}}-1")
|
| 1210 |
-
|
| 1211 |
-
st.write("**Cross-sectional percentile (per day)**")
|
| 1212 |
-
st.latex(r"p_{i,t}=\frac{\operatorname{rank}\!\left(r^{(n)}_{i,t}\right)}{N}")
|
| 1213 |
-
st.write("0 means worst in the universe that day. 1 means best.")
|
| 1214 |
-
st.write("The heatmap shows p_{i,t}. Rows are sorted by the latest percentile.")
|
| 1215 |
-
|
| 1216 |
-
st.write("**Breadth buckets (how to read)**")
|
| 1217 |
-
st.latex(r"\text{Top\,20\%}_t=\frac{1}{N}\sum_{i}\mathbf{1}[p_{i,t}\ge 0.80]")
|
| 1218 |
-
st.latex(r"\text{Bottom\,20\%}_t=\frac{1}{N}\sum_{i}\mathbf{1}[p_{i,t}\le 0.20]")
|
| 1219 |
-
st.write("High Top-20% share signals broad upside momentum. High Bottom-20% share signals broad weakness.")
|
| 1220 |
-
|
| 1221 |
-
st.write("**Momentum shift vs a short lookback**")
|
| 1222 |
-
st.latex(r"\Delta p_i=p_{i,T}-p_{i,T-w}")
|
| 1223 |
-
st.write("Improving names: Δp_i > 0. Weakening names: Δp_i < 0.")
|
| 1224 |
-
|
| 1225 |
-
st.write("**Persistence (top/bottom quintile)**")
|
| 1226 |
-
st.latex(r"\text{TopQ}_{i}=\sum_{k=0}^{w-1}\mathbf{1}[p_{i,T-k}\ge 0.80]")
|
| 1227 |
-
st.latex(r"\text{BotQ}_{i}=\sum_{k=0}^{w-1}\mathbf{1}[p_{i,T-k}\le 0.20]")
|
| 1228 |
-
st.write("Names with TopQ = w held leadership. BotQ = w stayed weak.")
|
| 1229 |
-
|
| 1230 |
-
st.write("**Practical reads**")
|
| 1231 |
-
st.write("- Rising median percentile and high Top-20% share: trend has breadth.")
|
| 1232 |
-
st.write("- Mixed median with both tails active: rotation/dispersion regime.")
|
| 1233 |
-
st.write("- Persistent top-quintile list: candidates for follow-through.")
|
| 1234 |
-
st.write("- Persistent bottom-quintile list: candidates for mean-reversion checks.")
|
| 1235 |
-
|
| 1236 |
-
|
| 1237 |
-
look_days = int(mom_look)
|
| 1238 |
-
ret_n = clean_close.pct_change(look_days)
|
| 1239 |
-
ret_n = ret_n.iloc[look_days:]
|
| 1240 |
-
if ret_n.empty:
|
| 1241 |
-
st.warning("Not enough data for the momentum heatmap.")
|
| 1242 |
-
else:
|
| 1243 |
-
perc = ret_n.rank(axis=1, pct=True)
|
| 1244 |
-
order2 = perc.iloc[-1].sort_values(ascending=True).index
|
| 1245 |
-
perc = perc[order2]
|
| 1246 |
-
|
| 1247 |
-
z = perc.T.values
|
| 1248 |
-
x = perc.index
|
| 1249 |
-
y = list(order2)
|
| 1250 |
-
|
| 1251 |
-
n_dates = len(x)
|
| 1252 |
-
step = max(1, n_dates // 10)
|
| 1253 |
-
xtick_vals = x[::step]
|
| 1254 |
-
xtick_texts = [ts.strftime("%Y-%m-%d") for ts in xtick_vals]
|
| 1255 |
-
|
| 1256 |
-
fig_pm = go.Figure(go.Heatmap(
|
| 1257 |
-
z=z, x=x, y=y,
|
| 1258 |
-
colorscale="Viridis",
|
| 1259 |
-
zmin=0, zmax=1,
|
| 1260 |
-
colorbar=dict(title="Return Percentile"),
|
| 1261 |
-
hovertemplate="%{y}<br>%{x|%Y-%m-%d}<br>%{z:.0%}<extra></extra>"
|
| 1262 |
-
))
|
| 1263 |
|
| 1264 |
-
|
| 1265 |
-
|
| 1266 |
-
|
| 1267 |
-
|
| 1268 |
-
|
| 1269 |
-
|
| 1270 |
-
|
| 1271 |
-
|
| 1272 |
-
|
| 1273 |
-
|
| 1274 |
-
|
| 1275 |
-
|
| 1276 |
-
|
| 1277 |
-
|
| 1278 |
-
|
| 1279 |
-
|
| 1280 |
-
|
| 1281 |
-
|
| 1282 |
-
|
| 1283 |
-
|
| 1284 |
-
|
| 1285 |
-
|
| 1286 |
-
|
| 1287 |
-
|
| 1288 |
-
|
| 1289 |
-
|
| 1290 |
-
|
| 1291 |
-
|
| 1292 |
-
|
| 1293 |
-
|
| 1294 |
-
|
| 1295 |
-
|
| 1296 |
-
|
| 1297 |
-
|
| 1298 |
-
|
| 1299 |
-
|
| 1300 |
-
|
| 1301 |
-
|
| 1302 |
-
|
| 1303 |
-
|
| 1304 |
-
|
| 1305 |
-
|
| 1306 |
-
|
| 1307 |
-
|
| 1308 |
-
|
| 1309 |
-
|
| 1310 |
-
|
| 1311 |
-
|
| 1312 |
-
|
| 1313 |
-
|
| 1314 |
-
|
| 1315 |
-
|
| 1316 |
-
|
| 1317 |
-
|
| 1318 |
-
|
| 1319 |
-
|
| 1320 |
-
|
| 1321 |
-
|
| 1322 |
-
|
| 1323 |
-
|
| 1324 |
-
|
| 1325 |
-
|
| 1326 |
-
|
| 1327 |
-
|
| 1328 |
-
|
| 1329 |
-
|
| 1330 |
-
|
| 1331 |
-
|
| 1332 |
-
|
| 1333 |
-
|
| 1334 |
-
|
| 1335 |
-
|
| 1336 |
-
|
| 1337 |
-
|
| 1338 |
-
|
| 1339 |
-
|
| 1340 |
-
|
| 1341 |
-
|
| 1342 |
-
|
| 1343 |
-
|
| 1344 |
-
|
| 1345 |
-
|
| 1346 |
-
|
| 1347 |
-
|
| 1348 |
|
| 1349 |
-
|
| 1350 |
-
|
| 1351 |
-
|
| 1352 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1353 |
|
| 1354 |
-
|
|
|
|
|
|
|
|
|
|
| 1355 |
|
|
|
|
| 1356 |
|
| 1357 |
# Hide default Streamlit style
|
| 1358 |
st.markdown(
|
|
|
|
| 1 |
+
# app.py — Market Breadth & Momentum (sticky results, no nested expanders)
|
| 2 |
+
|
| 3 |
import os
|
| 4 |
import io
|
| 5 |
import time
|
|
|
|
| 43 |
with st.sidebar:
|
| 44 |
st.header("Parameters")
|
| 45 |
|
| 46 |
+
# Each expander is independent (no nesting).
|
| 47 |
with st.expander("Data Window", expanded=False):
|
| 48 |
default_start = datetime(2015, 1, 1).date()
|
| 49 |
default_end = (datetime.today().date() + timedelta(days=1))
|
|
|
|
| 153 |
heat_last_days=int(heat_last_days),
|
| 154 |
mom_look=int(mom_look),
|
| 155 |
)
|
|
|
|
| 156 |
st.session_state.run_id = f"{time.time():.0f}"
|
| 157 |
|
| 158 |
if clear_clicked:
|
|
|
|
| 355 |
|
| 356 |
# ===================== SECTION 1 — Breadth Dashboard =====================
|
| 357 |
st.header("Breadth Dashboard")
|
|
|
|
|
|
|
| 358 |
|
| 359 |
+
# Methodology (standalone expander)
|
|
|
|
| 360 |
with st.expander("Methodology", expanded=False):
|
| 361 |
# Overview
|
| 362 |
st.write("This panel tracks trend, participation, and momentum for a broad equity universe.")
|
| 363 |
st.write("Use it to judge trend quality, spot divergences, and gauge risk bias.")
|
| 364 |
+
|
| 365 |
# 1) Price trend (MAs, VWAP)
|
| 366 |
st.write("**Price trend**")
|
| 367 |
st.latex(r"\mathrm{SMA}_{n}(t)=\frac{1}{n}\sum_{k=0}^{n-1}P_{t-k}")
|
|
|
|
| 369 |
st.latex(r"\mathrm{VWAP}_{200w}(t)=\frac{\sum_{k=0}^{N-1}P_{t-k}V_{t-k}}{\sum_{k=0}^{N-1}V_{t-k}},\quad N\approx200\times5")
|
| 370 |
st.write("Price above both MAs and fast>slow = strong trend.")
|
| 371 |
st.write("Price below both MAs and fast<slow = weak trend.")
|
| 372 |
+
|
| 373 |
# 2) Participation breadth (% above MAs)
|
| 374 |
st.write("**Participation breadth**")
|
| 375 |
st.write("Share above n-day MA:")
|
| 376 |
st.latex(r"\%\,\text{Above}_n(t)=100\cdot\frac{\#\{i:\ P_{i,t}>\mathrm{SMA}_{n,i}(t)\}}{N}")
|
| 377 |
st.write("Zones: 0–20 weak, 20–50 neutral, 50–80 strong.")
|
| 378 |
st.write("Higher shares mean broad support for the trend.")
|
| 379 |
+
|
| 380 |
+
# 3) Advance–Decline line
|
| 381 |
st.write("**Advance–Decline (A/D) line**")
|
| 382 |
st.latex(r"A_t=\#\{i:\ P_{i,t}>P_{i,t-1}\},\quad D_t=\#\{i:\ P_{i,t}<P_{i,t-1}\}")
|
| 383 |
st.latex(r"\mathrm{ADLine}_t=\sum_{u\le t}(A_u-D_u)")
|
| 384 |
st.write("Rising A/D confirms uptrends. Falling A/D warns of narrow leadership.")
|
| 385 |
+
|
| 386 |
# 4) Net new 52-week highs
|
| 387 |
st.write("**Net new 52-week highs**")
|
| 388 |
st.latex(r"H_{i,t}^{52}=\max_{u\in[t-251,t]}P_{i,u},\quad L_{i,t}^{52}=\min_{u\in[t-251,t]}P_{i,u}")
|
| 389 |
st.latex(r"\text{NewHighs}_t=\sum_i \mathbf{1}\{P_{i,t}=H_{i,t}^{52}\},\quad \text{NewLows}_t=\sum_i \mathbf{1}\{P_{i,t}=L_{i,t}^{52}\}")
|
| 390 |
st.latex(r"\text{NetHighs}_t=\text{NewHighs}_t-\text{NewLows}_t")
|
| 391 |
st.write("Positive and persistent net highs support trend durability.")
|
| 392 |
+
|
| 393 |
# 5) Smoothed advancing vs declining counts
|
| 394 |
st.write("**Advancing vs declining (smoothed)**")
|
| 395 |
st.latex(r"\overline{A}_t=\frac{1}{w}\sum_{k=0}^{w-1}A_{t-k},\quad \overline{D}_t=\frac{1}{w}\sum_{k=0}^{w-1}D_{t-k}")
|
| 396 |
st.write("Advancers > decliners over the window = constructive breadth.")
|
| 397 |
+
|
| 398 |
# 6) McClellan Oscillator
|
| 399 |
st.write("**McClellan Oscillator (MO)**")
|
| 400 |
st.latex(r"E^{(n)}_t=\text{EMA}_n(A_t-D_t)")
|
| 401 |
st.latex(r"\mathrm{MO}_t=E^{(19)}_t-E^{(39)}_t")
|
| 402 |
st.write("Zero-line up-cross = improving momentum. Down-cross = fading momentum.")
|
| 403 |
st.write("A 9-day EMA of MO can act as a signal line.")
|
| 404 |
+
|
| 405 |
# Practical reads
|
| 406 |
st.write("**Practical use**")
|
| 407 |
st.write("- Broad strength: % above 200-day ≥ 50% supports trends.")
|
|
|
|
| 552 |
)
|
| 553 |
st.plotly_chart(fig, use_container_width=True)
|
| 554 |
|
| 555 |
+
# --- Dynamic interpretation (standalone expander) ---
|
| 556 |
with st.expander("Dynamic Interpretation", expanded=False):
|
| 557 |
buf = io.StringIO()
|
| 558 |
+
|
| 559 |
def _last_val(s):
|
| 560 |
s = s.dropna()
|
| 561 |
return s.iloc[-1] if len(s) else np.nan
|
| 562 |
+
|
| 563 |
def _last_date(s):
|
| 564 |
s = s.dropna()
|
| 565 |
return s.index[-1] if len(s) else None
|
| 566 |
+
|
| 567 |
def _pct(a, b):
|
| 568 |
if not np.isfinite(a) or not np.isfinite(b) or b == 0:
|
| 569 |
return np.nan
|
| 570 |
return (a - b) / b * 100.0
|
| 571 |
+
|
| 572 |
def _fmt_pct(x):
|
| 573 |
return "n/a" if not np.isfinite(x) else f"{x:.1f}%"
|
| 574 |
+
|
| 575 |
def _fmt_num(x):
|
| 576 |
return "n/a" if not np.isfinite(x) else f"{x:,.2f}"
|
| 577 |
|
| 578 |
as_of = _last_date(idx)
|
| 579 |
+
|
| 580 |
px = _last_val(idx)
|
| 581 |
ma50 = _last_val(sma_fast_idx)
|
| 582 |
ma200 = _last_val(sma_slow_idx)
|
| 583 |
vwap200 = _last_val(vwap_idx)
|
| 584 |
+
|
| 585 |
p50 = float(_last_val(pct_above_fast))
|
| 586 |
p200 = float(_last_val(pct_above_slow))
|
| 587 |
+
|
| 588 |
ad_now = _last_val(ad_line)
|
| 589 |
nh_now = int(_last_val(new_highs)) if np.isfinite(_last_val(new_highs)) else 0
|
| 590 |
nh_sma = float(_last_val(sma10_net_hi))
|
| 591 |
+
|
| 592 |
avg_adv_last = float(_last_val(avg_adv))
|
| 593 |
avg_decl_last = float(_last_val(avg_decl))
|
| 594 |
+
|
| 595 |
_ema19 = net_adv.ewm(span=int(mo_span_fast), adjust=False).mean()
|
| 596 |
_ema39 = net_adv.ewm(span=int(mo_span_slow), adjust=False).mean()
|
| 597 |
mc_osc2 = (_ema19 - _ema39).rename("MO")
|
| 598 |
mc_signal = mc_osc2.ewm(span=int(mo_signal_span), adjust=False).mean().rename("Signal")
|
| 599 |
+
|
| 600 |
mo_last = float(_last_val(mc_osc2))
|
| 601 |
mo_prev = float(_last_val(mc_osc2.shift(1)))
|
| 602 |
mo_5ago = float(_last_val(mc_osc2.shift(5)))
|
| 603 |
mo_slope5 = mo_last - mo_5ago
|
| 604 |
mo_sig_last = float(_last_val(mc_signal))
|
| 605 |
mo_sig_prev = float(_last_val(mc_signal.shift(1)))
|
| 606 |
+
|
| 607 |
mo_roll = mc_osc2.rolling(252, min_periods=126)
|
| 608 |
mo_mean = mo_roll.mean()
|
| 609 |
mo_std = mo_roll.std()
|
| 610 |
mo_z = (mc_osc2 - mo_mean) / mo_std
|
| 611 |
mo_z_last = float(_last_val(mo_z))
|
| 612 |
+
|
| 613 |
mo_abs = np.abs(mc_osc2.dropna())
|
| 614 |
if len(mo_abs) >= 20:
|
| 615 |
mo_ext = float(np.nanpercentile(mo_abs.tail(252), 90))
|
| 616 |
else:
|
| 617 |
mo_ext = np.nan
|
| 618 |
+
|
| 619 |
look_fast = 10
|
| 620 |
look_mid = 20
|
| 621 |
look_div = 63
|
| 622 |
+
|
| 623 |
ma50_slope = _last_val(sma_fast_idx.diff(look_fast))
|
| 624 |
ma200_slope = _last_val(sma_slow_idx.diff(look_mid))
|
| 625 |
p50_chg = p50 - float(_last_val(pct_above_fast.shift(look_fast)))
|
| 626 |
p200_chg = p200 - float(_last_val(pct_above_slow.shift(look_fast)))
|
| 627 |
ad_mom = ad_now - float(_last_val(ad_line.shift(look_mid)))
|
| 628 |
+
|
| 629 |
d50 = _pct(px, ma50)
|
| 630 |
d200 = _pct(px, ma200)
|
| 631 |
dvw = _pct(px, vwap200)
|
| 632 |
h63 = float(_last_val(idx.rolling(look_div).max()))
|
| 633 |
dd63 = _pct(px, h63) if np.isfinite(h63) else np.nan
|
| 634 |
+
|
| 635 |
ad_63h = float(_last_val(ad_line.rolling(look_div).max()))
|
| 636 |
mo_63h = float(_last_val(mc_osc2.rolling(look_div).max()))
|
| 637 |
near_high_px = np.isfinite(h63) and np.isfinite(px) and px >= 0.995 * h63
|
| 638 |
near_high_ad = np.isfinite(ad_63h) and np.isfinite(ad_now) and ad_now >= 0.995 * ad_63h
|
| 639 |
near_high_mo = np.isfinite(mo_63h) and np.isfinite(mo_last) and mo_last >= 0.95 * mo_63h
|
| 640 |
+
|
| 641 |
breadth_thrust = (p50 >= 55) and (p50_chg >= 20)
|
| 642 |
+
|
| 643 |
score = 0
|
| 644 |
score += 1 if px > ma50 else 0
|
| 645 |
score += 1 if px > ma200 else 0
|
|
|
|
| 651 |
score += 1 if nh_now > 0 and nh_sma >= 0 else 0
|
| 652 |
score += 1 if avg_adv_last > avg_decl_last else 0
|
| 653 |
score += 1 if (mo_last > 0 and mo_slope5 > 0) else 0
|
| 654 |
+
|
| 655 |
if score >= 8:
|
| 656 |
regime = "Risk-on bias"
|
| 657 |
elif score >= 5:
|
|
|
|
| 660 |
regime = "Risk-off bias"
|
| 661 |
|
| 662 |
print(f"=== Market breadth narrative — {as_of.date() if as_of is not None else 'N/A'} ===", file=buf)
|
| 663 |
+
|
| 664 |
# [Trend]
|
| 665 |
print("\n[Trend]", file=buf)
|
| 666 |
if np.isfinite(px) and np.isfinite(ma50) and np.isfinite(ma200):
|
|
|
|
| 829 |
|
| 830 |
st.text(buf.getvalue())
|
| 831 |
|
| 832 |
+
# ===================== SECTION 2 — Rebased Comparison =====================
|
| 833 |
+
st.header("Rebased Comparison (Last N sessions)")
|
| 834 |
+
|
| 835 |
+
# Methodology (standalone expander)
|
| 836 |
+
with st.expander("Methodology", expanded=False):
|
| 837 |
+
st.write("Compares stock paths on a common scale and highlights leadership vs laggards.")
|
| 838 |
+
st.write("Use it to judge breadth, concentration, and dispersion over the selected window.")
|
| 839 |
+
|
| 840 |
+
st.write("**Rebasing (start = B)**")
|
| 841 |
+
st.latex(r"R_{i,t}= \frac{P_{i,t}}{P_{i,t_0}}\times B")
|
| 842 |
+
st.write("Each line shows cumulative performance since the window start.")
|
| 843 |
+
st.write("The index is rebased the same way for reference.")
|
| 844 |
+
|
| 845 |
+
st.write("**Log scale**")
|
| 846 |
+
st.write("We plot the y-axis in log scale so equal percent moves look equal.")
|
| 847 |
+
st.write("Y-range uses robust bounds (1st–99th percentiles) with padding.")
|
| 848 |
+
|
| 849 |
+
st.write("**Leaders and laggards**")
|
| 850 |
+
st.latex(r"\text{Perf}_{i}=R_{i,T}")
|
| 851 |
+
st.write("Leaders are highest Perf at T. Laggards are lowest.")
|
| 852 |
+
st.write("MAG7 are highlighted if present.")
|
| 853 |
+
|
| 854 |
+
st.write("**Equal-weight summaries**")
|
| 855 |
+
st.latex(r"\text{EWAvg}_T=\frac{1}{M}\sum_{i=1}^{M}R_{i,T}")
|
| 856 |
+
st.latex(r"\text{Median}_T=\operatorname{median}\{R_{i,T}\}")
|
| 857 |
+
st.latex(r"\%\text{Up}_T=100\cdot \frac{1}{M}\sum_{i=1}^{M}\mathbf{1}[R_{i,T}>B]")
|
| 858 |
+
st.latex(r"\%\text{BeatIdx}_T=100\cdot \frac{1}{M}\sum_{i=1}^{M}\mathbf{1}[R_{i,T}>R_{\text{idx},T}]")
|
| 859 |
+
st.write("These give a breadth read relative to the index and to flat (B).")
|
| 860 |
+
|
| 861 |
+
st.write("**Dispersion (cross-section)**")
|
| 862 |
+
st.latex(r"\sigma_T=\operatorname{stdev}\{R_{i,T}\},\quad \text{IQR}_T=Q_{0.75}-Q_{0.25}")
|
| 863 |
+
st.write("High dispersion means large performance spread across names.")
|
| 864 |
+
|
| 865 |
+
st.write("**Concentration (top N share of gains)**")
|
| 866 |
+
st.latex(r"\text{TopNShare}_T=\frac{\sum_{i\in \text{Top}N}(R_{i,T}-B)}{\sum_{j=1}^{M}(R_{j,T}-B)}\times 100")
|
| 867 |
+
st.write("Large TopNShare implies leadership is concentrated.")
|
| 868 |
+
|
| 869 |
+
st.write("**Correlation to index (optional diagnostic)**")
|
| 870 |
+
st.latex(r"\rho_i=\operatorname{corr}\big(\Delta \ln P_{i,t},\, \Delta \ln P_{\text{idx},t}\big)")
|
| 871 |
+
st.write("Lower median correlation favors stock picking. High correlation means beta drives moves.")
|
| 872 |
+
|
| 873 |
+
st.write("**Practical reads**")
|
| 874 |
+
st.write("- Broad advance: many lines above the index and %BeatIdx high.")
|
| 875 |
+
st.write("- Concentration risk: TopNShare large while most lines trail the index.")
|
| 876 |
+
st.write("- Rotation/dispersion: high cross-section std and lower median correlation.")
|
| 877 |
+
st.write("- Leadership quality: leaders holding gains on a log scale with limited drawdowns.")
|
| 878 |
+
|
| 879 |
+
n_days = int(rebase_days)
|
| 880 |
+
base = float(rebase_base)
|
| 881 |
+
|
| 882 |
+
recent = clean_close.iloc[-n_days:].dropna(axis=1, how="any")
|
| 883 |
+
if recent.empty:
|
| 884 |
+
st.warning("Not enough overlapping history for the rebased comparison window.")
|
| 885 |
+
else:
|
| 886 |
+
first = recent.iloc[0]
|
| 887 |
+
mask = (first > 0) & np.isfinite(first)
|
| 888 |
+
rebased = (recent.loc[:, mask] / first[mask]) * base
|
| 889 |
+
|
| 890 |
+
perf = rebased.iloc[-1].dropna()
|
| 891 |
+
mag7_all = ["AAPL","MSFT","AMZN","META","GOOGL","NVDA","TSLA"]
|
| 892 |
+
mag7 = [t for t in mag7_all if t in rebased.columns]
|
| 893 |
+
non_mag = perf.drop(index=mag7, errors="ignore")
|
| 894 |
+
top5 = non_mag.nlargest(min(5, len(non_mag))).index.tolist()
|
| 895 |
+
worst5 = non_mag.nsmallest(min(5, len(non_mag))).index.tolist()
|
| 896 |
+
|
| 897 |
+
mag_colors = {
|
| 898 |
+
"AAPL":"#00bfff","MSFT":"#3cb44b","AMZN":"#ffe119",
|
| 899 |
+
"META":"#4363d8","GOOGL":"#f58231","NVDA":"#911eb4","TSLA":"#46f0f0"
|
| 900 |
+
}
|
| 901 |
+
|
| 902 |
+
spx = idx.reindex(rebased.index).dropna()
|
| 903 |
+
spx_rebased = spx / spx.iloc[0] * base
|
| 904 |
+
|
| 905 |
+
def hover_tmpl(name: str) -> str:
|
| 906 |
+
return "%{y:.2f}<br>%{x|%Y-%m-%d}<extra>" + name + "</extra>"
|
| 907 |
+
|
| 908 |
+
fig2 = go.Figure()
|
| 909 |
+
for t in rebased.columns:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 910 |
fig2.add_trace(go.Scatter(
|
| 911 |
+
x=rebased.index, y=rebased[t], name=t, mode="lines",
|
| 912 |
+
line=dict(width=1, color="rgba(160,160,160,0.4)"),
|
| 913 |
+
hovertemplate=hover_tmpl(t), showlegend=False
|
| 914 |
))
|
| 915 |
+
for t in mag7:
|
| 916 |
+
fig2.add_trace(go.Scatter(
|
| 917 |
+
x=rebased.index, y=rebased[t], name=t, mode="lines",
|
| 918 |
+
line=dict(width=2, color=mag_colors.get(t, "#ffffff")),
|
| 919 |
+
hovertemplate=hover_tmpl(t)
|
| 920 |
+
))
|
| 921 |
+
for t in top5:
|
| 922 |
+
fig2.add_trace(go.Scatter(
|
| 923 |
+
x=rebased.index, y=rebased[t], name=f"Top {t}", mode="lines",
|
| 924 |
+
line=dict(width=2, color="lime"),
|
| 925 |
+
hovertemplate=hover_tmpl(t), showlegend=False
|
| 926 |
+
))
|
| 927 |
+
for t in worst5:
|
| 928 |
+
fig2.add_trace(go.Scatter(
|
| 929 |
+
x=rebased.index, y=rebased[t], name=f"Worst {t}", mode="lines",
|
| 930 |
+
line=dict(width=2, color="red", dash="dash"),
|
| 931 |
+
hovertemplate=hover_tmpl(t), showlegend=False
|
| 932 |
+
))
|
| 933 |
+
fig2.add_trace(go.Scatter(
|
| 934 |
+
x=spx_rebased.index, y=spx_rebased.values, name="S&P 500 (rebased)", mode="lines",
|
| 935 |
+
line=dict(width=3, color="white"), hovertemplate=hover_tmpl("S&P 500")
|
| 936 |
+
))
|
| 937 |
+
|
| 938 |
+
vals = pd.concat([rebased.stack(), pd.Series(spx_rebased.values, index=spx_rebased.index)])
|
| 939 |
+
vals = vals.replace([np.inf, -np.inf], np.nan).dropna()
|
| 940 |
+
vals = vals[vals > 0]
|
| 941 |
+
y_range = None
|
| 942 |
+
if len(vals) > 10:
|
| 943 |
+
qlo, qhi = vals.quantile([0.01, 0.99])
|
| 944 |
+
y_min = max(1e-2, qlo / y_pad)
|
| 945 |
+
y_max = max(y_min * 1.1, qhi * y_pad)
|
| 946 |
+
y_range = [np.log10(y_min), np.log10(y_max)]
|
| 947 |
+
|
| 948 |
+
fig2.update_yaxes(type="log", range=y_range, title=f"Rebased Price (start = {int(base)})")
|
| 949 |
+
fig2.update_xaxes(title="Date")
|
| 950 |
+
fig2.update_layout(
|
| 951 |
+
template="plotly_dark",
|
| 952 |
+
height=700,
|
| 953 |
+
margin=dict(l=60, r=30, t=70, b=90),
|
| 954 |
+
title=f"Price Level Comparison (Rebased, Log Scale) — Last {n_days} Sessions",
|
| 955 |
+
legend=dict(orientation="h", y=-0.18, yanchor="top", x=0, xanchor="left"),
|
| 956 |
+
hovermode="closest",
|
| 957 |
+
font=dict(color="white")
|
| 958 |
+
)
|
| 959 |
+
st.plotly_chart(fig2, use_container_width=True)
|
| 960 |
|
| 961 |
+
# Dynamic Interpretation (standalone expander)
|
| 962 |
+
with st.expander("Dynamic Interpretation", expanded=False):
|
| 963 |
+
buf2 = io.StringIO()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 964 |
|
| 965 |
+
def _fmt_pct(x):
|
| 966 |
+
return "n/a" if pd.isna(x) else f"{x:.1f}%"
|
| 967 |
|
| 968 |
+
def _fmt_num(x):
|
| 969 |
+
return "n/a" if pd.isna(x) else f"{x:,.2f}"
|
| 970 |
|
| 971 |
+
if rebased.empty or spx_rebased.empty:
|
| 972 |
+
print("No data for interpretation.", file=buf2)
|
| 973 |
+
else:
|
| 974 |
+
as_of = rebased.index[-1].date()
|
| 975 |
+
perf_last = rebased.iloc[-1].dropna()
|
| 976 |
+
spx_last = float(spx_rebased.iloc[-1])
|
| 977 |
+
n_names = len(perf_last)
|
| 978 |
+
eq_avg = float(perf_last.mean())
|
| 979 |
+
eq_med = float(perf_last.median())
|
| 980 |
+
pct_pos = float((perf_last > base).mean() * 100)
|
| 981 |
+
pct_beat = float((perf_last > spx_last).mean() * 100)
|
| 982 |
+
disp_std = float(perf_last.std(ddof=0))
|
| 983 |
+
iqr_lo, iqr_hi = float(perf_last.quantile(0.25)), float(perf_last.quantile(0.75))
|
| 984 |
+
iqr_w = iqr_hi - iqr_lo
|
| 985 |
+
|
| 986 |
+
mag7_in = [t for t in mag7 if t in perf_last.index]
|
| 987 |
+
rest_idx = perf_last.index.difference(mag7_in)
|
| 988 |
+
mag7_mean = float(perf_last[mag7_in].mean()) if len(mag7_in) else np.nan
|
| 989 |
+
rest_mean = float(perf_last[rest_idx].mean()) if len(rest_idx) else np.nan
|
| 990 |
+
mag7_beat = float((perf_last[mag7_in] > spx_last).mean() * 100) if len(mag7_in) else np.nan
|
| 991 |
+
|
| 992 |
+
gains_all = float((perf_last - base).sum())
|
| 993 |
+
topN = 10
|
| 994 |
+
top_contrib = np.nan
|
| 995 |
+
if abs(gains_all) > 1e-9:
|
| 996 |
+
top_contrib = float((perf_last.sort_values(ascending=False).head(topN) - base).sum() / gains_all * 100)
|
| 997 |
+
|
| 998 |
+
rets = rebased.pct_change().replace([np.inf, -np.inf], np.nan).dropna(how="all")
|
| 999 |
+
spx_r = pd.Series(spx_rebased, index=spx_rebased.index).pct_change()
|
| 1000 |
+
corr_to_spx = rets.corrwith(spx_r, axis=0).dropna()
|
| 1001 |
+
corr_med = float(corr_to_spx.median()) if len(corr_to_spx) else np.nan
|
| 1002 |
+
low_corr_share = float((corr_to_spx < 0.3).mean() * 100) if len(corr_to_spx) else np.nan
|
| 1003 |
+
|
| 1004 |
+
spx_chg = spx_last - base
|
| 1005 |
+
k = min(5, n_names)
|
| 1006 |
+
leaders = perf_last.sort_values(ascending=False).head(k)
|
| 1007 |
+
laggards = perf_last.sort_values(ascending=True).head(k)
|
| 1008 |
+
|
| 1009 |
+
print(f"=== Rebased performance read — {as_of} (window: {n_days} sessions) ===\n", file=buf2)
|
| 1010 |
+
print("[Market]", file=buf2)
|
| 1011 |
+
print(f"S&P 500 is {_fmt_pct(spx_chg)} over the window.", file=buf2)
|
| 1012 |
+
print(f"Equal-weight average is {_fmt_pct(eq_avg - base)}, median is {_fmt_pct(eq_med - base)}.", file=buf2)
|
| 1013 |
+
if np.isfinite(eq_avg) and np.isfinite(spx_last):
|
| 1014 |
+
gap = (eq_avg - spx_last)
|
| 1015 |
+
side = "above" if gap >= 0 else "below"
|
| 1016 |
+
print(f"Equal-weight sits {_fmt_pct(abs(gap))} {side} the index.", file=buf2)
|
| 1017 |
+
print("", file=buf2)
|
| 1018 |
+
|
| 1019 |
+
print("[Breadth]", file=buf2)
|
| 1020 |
+
print(f"{_fmt_pct(pct_pos)} of names are up. {_fmt_pct(pct_beat)} beat the index.", file=buf2)
|
| 1021 |
+
print(f"Dispersion std is {_fmt_num(disp_std)} points on the rebased scale.", file=buf2)
|
| 1022 |
+
print(f"IQR width is {_fmt_num(iqr_w)} points ({_fmt_num(iqr_lo)} to {_fmt_num(iqr_hi)}).", file=buf2)
|
| 1023 |
+
if pct_pos >= 70 and pct_beat >= 55:
|
| 1024 |
+
print("Rally is broad. Leadership is shared across many names.", file=buf2)
|
| 1025 |
+
elif pct_pos <= 35 and pct_beat <= 45:
|
| 1026 |
+
print("Rally is narrow or absent. Leadership is concentrated.", file=buf2)
|
| 1027 |
else:
|
| 1028 |
+
print("Breadth is mixed. The tape can rotate quickly.", file=buf2)
|
| 1029 |
+
print("", file=buf2)
|
| 1030 |
+
|
| 1031 |
+
print("[Concentration]", file=buf2)
|
| 1032 |
+
if np.isfinite(top_contrib):
|
| 1033 |
+
print(f"Top {topN} names explain {_fmt_pct(top_contrib)} of equal-weight gains.", file=buf2)
|
| 1034 |
+
if len(mag7_in):
|
| 1035 |
+
print(f"MAG7 equal-weight is {_fmt_pct(mag7_mean - base)}. Rest is {_fmt_pct(rest_mean - base)}.", file=buf2)
|
| 1036 |
+
if np.isfinite(mag7_beat):
|
| 1037 |
+
print(f"{_fmt_pct(mag7_beat)} of MAG7 beat the index.", file=buf2)
|
| 1038 |
+
else:
|
| 1039 |
+
print("MAG7 tickers are not all present in this window.", file=buf2)
|
| 1040 |
+
print("", file=buf2)
|
| 1041 |
+
|
| 1042 |
+
print("[Correlation]", file=buf2)
|
| 1043 |
+
if len(corr_to_spx):
|
| 1044 |
+
print(f"Median correlation to the index is {_fmt_num(corr_med)}.", file=buf2)
|
| 1045 |
+
print(f"{_fmt_pct(low_corr_share)} of names show low correlation (<0.30).", file=buf2)
|
| 1046 |
+
if np.isfinite(corr_med) and corr_med < 0.5:
|
| 1047 |
+
print("Factor dispersion is high. Stock picking matters more.", file=buf2)
|
| 1048 |
+
elif np.isfinite(corr_med) and corr_med > 0.8:
|
| 1049 |
+
print("Common beta dominates. Moves are index-driven.", file=buf2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1050 |
else:
|
| 1051 |
+
print("Correlation sits in a middle zone. Rotation can continue.", file=buf2)
|
| 1052 |
+
else:
|
| 1053 |
+
print("Not enough data to compute correlations.", file=buf2)
|
| 1054 |
+
print("", file=buf2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1055 |
|
| 1056 |
+
print("[Leaders]", file=buf2)
|
| 1057 |
+
for t, v in leaders.items():
|
| 1058 |
+
print(f" {t}: {_fmt_pct(v - base)}", file=buf2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1059 |
|
| 1060 |
+
print("\n[Laggards]", file=buf2)
|
| 1061 |
+
for t, v in laggards.items():
|
| 1062 |
+
print(f" {t}: {_fmt_pct(v - base)}", file=buf2)
|
| 1063 |
|
| 1064 |
+
print("\n[What to monitor]", file=buf2)
|
| 1065 |
+
print("Watch the gap between equal-weight and index. A widening gap signals concentration risk.", file=buf2)
|
| 1066 |
+
print("Track the share beating the index. Sustained readings above 55% support trend durability.", file=buf2)
|
| 1067 |
+
print("Watch median correlation. Falling correlation favors dispersion and relative value setups.", file=buf2)
|
| 1068 |
|
| 1069 |
+
st.text(buf2.getvalue())
|
|
|
|
| 1070 |
|
| 1071 |
+
# ===================== SECTION 3 — Daily Return Heatmap =====================
|
| 1072 |
+
st.header("Daily Return Heatmap")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1073 |
|
| 1074 |
+
# Methodology (standalone expander)
|
| 1075 |
+
with st.expander("Methodology", expanded=False):
|
| 1076 |
+
st.write("Shows daily % returns for all names over the selected window. Highlights broad up/down days, dispersion, and leadership.")
|
| 1077 |
+
st.write("Use it to spot synchronized moves, stress days, and rotation across the universe.")
|
| 1078 |
+
|
| 1079 |
+
st.write("**Daily return (per name)**")
|
| 1080 |
+
st.latex(r"r_{i,t}=\frac{P_{i,t}}{P_{i,t-1}}-1")
|
| 1081 |
+
|
| 1082 |
+
st.write("**Heatmap values**")
|
| 1083 |
+
st.write("Cells display r_{i,t}. Tickers are sorted by the most recent day’s return so leaders/laggards are obvious.")
|
| 1084 |
+
|
| 1085 |
+
st.write("**Robust color scale (cap extremes)**")
|
| 1086 |
+
st.latex(r"c=\operatorname{P95}\left(\left|r_{i,t}\right|\right)\ \text{over the window}")
|
| 1087 |
+
st.latex(r"\text{color range}=[-c,\,+c],\quad \text{midpoint}=0")
|
| 1088 |
+
st.write("Capping avoids a few outliers overpowering the color scale.")
|
| 1089 |
+
|
| 1090 |
+
st.write("**Breadth and dispersion (how to read)**")
|
| 1091 |
+
st.latex(r"\text{Up share}_t=100\cdot \frac{1}{N}\sum_{i=1}^{N}\mathbf{1}[r_{i,t}>0]")
|
| 1092 |
+
st.latex(r"\sigma_{\text{cs},t}=\operatorname{stdev}\{r_{i,t}\}_{i=1}^{N}")
|
| 1093 |
+
st.write("- High up share with low dispersion = uniform risk-on.")
|
| 1094 |
+
st.write("- Mixed colors with high dispersion = rotation and factor spread.")
|
| 1095 |
+
st.write("- Clusters of red/green by industry often flag sector moves.")
|
| 1096 |
+
|
| 1097 |
+
st.write("**Large-move counts (quick context)**")
|
| 1098 |
+
st.latex(r"\text{BigUp}_t=\sum_{i}\mathbf{1}[r_{i,t}\ge \tau],\quad \text{BigDn}_t=\sum_{i}\mathbf{1}[r_{i,t}\le -\tau]")
|
| 1099 |
+
st.latex(r"\tau=2\% \ \text{(default)}")
|
| 1100 |
+
st.write("A jump in BigUp/BigDn signals a thrust or a shock day.")
|
| 1101 |
+
|
| 1102 |
+
st.write("**Short-horizon follow-through**")
|
| 1103 |
+
st.latex(r"\bar{r}_{i,t}^{(w)}=\frac{1}{w}\sum_{k=0}^{w-1} r_{i,t-k},\quad w=5")
|
| 1104 |
+
st.write("A broad rise in 5-day averages supports continuation; a fade warns of stall.")
|
| 1105 |
+
|
| 1106 |
+
st.write("**Practical reads**")
|
| 1107 |
+
st.write("- Many greens, low dispersion: beta tailwind; index setups work.")
|
| 1108 |
+
st.write("- Greens + high dispersion: stock picking/sector tilts matter.")
|
| 1109 |
+
st.write("- Reds concentrated in a few groups: rotate risk, not necessarily de-risk.")
|
| 1110 |
+
st.write("- Extreme red breadth with spikes in dispersion: watch liquidity and reduce gross.")
|
| 1111 |
+
|
| 1112 |
+
# Daily returns last N days
|
| 1113 |
+
ret_daily = clean_close.pct_change().iloc[1:]
|
| 1114 |
+
ret_window = int(heat_last_days)
|
| 1115 |
+
ret_last = ret_daily.iloc[-ret_window:]
|
| 1116 |
+
if ret_last.empty:
|
| 1117 |
+
st.warning("Not enough data for the daily return heatmap.")
|
| 1118 |
+
else:
|
| 1119 |
+
order = ret_last.iloc[-1].sort_values(ascending=True).index
|
| 1120 |
+
ret_last = ret_last[order]
|
| 1121 |
+
|
| 1122 |
+
abs_max = np.nanpercentile(np.abs(ret_last.values), 95)
|
| 1123 |
+
z = ret_last.T.values
|
| 1124 |
+
x = ret_last.index
|
| 1125 |
+
y = list(order)
|
| 1126 |
+
|
| 1127 |
+
n_dates = len(x)
|
| 1128 |
+
step = max(1, n_dates // 10)
|
| 1129 |
+
xtick_vals = x[::step]
|
| 1130 |
+
xtick_texts = [ts.strftime("%Y-%m-%d") for ts in xtick_vals]
|
| 1131 |
+
|
| 1132 |
+
fig_hm = go.Figure(go.Heatmap(
|
| 1133 |
+
z=z, x=x, y=y,
|
| 1134 |
+
colorscale="RdYlGn",
|
| 1135 |
+
zmin=-abs_max, zmax=abs_max, zmid=0,
|
| 1136 |
+
colorbar=dict(title="Daily Return", tickformat=".0%"),
|
| 1137 |
+
hovertemplate="%{y}<br>%{x|%Y-%m-%d}<br>%{z:.2%}<extra></extra>"
|
| 1138 |
+
))
|
| 1139 |
+
|
| 1140 |
+
height = max(800, min(3200, 18 * len(y)))
|
| 1141 |
+
fig_hm.update_layout(
|
| 1142 |
+
template="plotly_dark",
|
| 1143 |
+
title=f"Last {ret_window}-Day Daily Return Heatmap",
|
| 1144 |
+
height=height,
|
| 1145 |
+
margin=dict(l=100, r=40, t=60, b=60),
|
| 1146 |
+
font=dict(color="white")
|
| 1147 |
+
)
|
| 1148 |
+
fig_hm.update_yaxes(title="Tickers (sorted by latest daily return)", tickfont=dict(size=8))
|
| 1149 |
+
fig_hm.update_xaxes(title="Date", tickmode="array", tickvals=xtick_vals, ticktext=xtick_texts, tickangle=45)
|
| 1150 |
+
st.plotly_chart(fig_hm, use_container_width=True)
|
| 1151 |
+
|
| 1152 |
+
# Dynamic Interpretation (standalone expander)
|
| 1153 |
+
with st.expander("Dynamic Interpretation", expanded=False):
|
| 1154 |
+
buf3 = io.StringIO()
|
| 1155 |
+
|
| 1156 |
+
def _pct(x):
|
| 1157 |
+
return "n/a" if pd.isna(x) else f"{x*100:.1f}%"
|
| 1158 |
|
| 1159 |
+
def _pp(x):
|
| 1160 |
+
return "n/a" if pd.isna(x) else f"{x*100:.2f}%"
|
| 1161 |
+
|
| 1162 |
+
if ret_last.empty:
|
| 1163 |
+
print("No data for interpretation.", file=buf3)
|
| 1164 |
+
else:
|
| 1165 |
+
as_of = ret_last.index[-1].date()
|
| 1166 |
+
last = ret_last.iloc[-1]
|
| 1167 |
+
N = last.shape[0]
|
| 1168 |
+
up = int((last > 0).sum())
|
| 1169 |
+
dn = int((last < 0).sum())
|
| 1170 |
+
flat = int(N - up - dn)
|
| 1171 |
+
mean = float(last.mean()); med = float(last.median())
|
| 1172 |
+
std = float(last.std(ddof=0))
|
| 1173 |
+
q25 = float(last.quantile(0.25)); q75 = float(last.quantile(0.75))
|
| 1174 |
+
iqr = q75 - q25
|
| 1175 |
+
thr = 0.02
|
| 1176 |
+
big_up = int((last >= thr).sum())
|
| 1177 |
+
big_dn = int((last <= -thr).sum())
|
| 1178 |
+
w = min(5, len(ret_last))
|
| 1179 |
+
avg_w = ret_last.tail(w).mean()
|
| 1180 |
+
pct_pos_w = float((avg_w > 0).mean())
|
| 1181 |
+
cs_std = ret_last.std(axis=1, ddof=0)
|
| 1182 |
+
today_std = float(cs_std.iloc[-1])
|
| 1183 |
+
disp_pct = float((cs_std <= today_std).mean())
|
| 1184 |
+
k = min(10, N)
|
| 1185 |
+
leaders = last.sort_values(ascending=False).head(k)
|
| 1186 |
+
laggards = last.sort_values(ascending=True ).head(k)
|
| 1187 |
+
|
| 1188 |
+
def _streak(s, max_look=20):
|
| 1189 |
+
v = s.tail(max_look).to_numpy(dtype=float)
|
| 1190 |
+
sign = np.sign(v); sign[np.isnan(sign)] = 0
|
| 1191 |
+
if len(sign) == 0 or sign[-1] == 0:
|
| 1192 |
+
return 0
|
| 1193 |
+
tgt = sign[-1]; cnt = 0
|
| 1194 |
+
for x in sign[::-1]:
|
| 1195 |
+
if x == tgt: cnt += 1
|
| 1196 |
+
else: break
|
| 1197 |
+
return int(cnt if tgt > 0 else -cnt)
|
| 1198 |
+
|
| 1199 |
+
streaks = {t: _streak(ret_last[t]) for t in set(leaders.index).union(laggards.index)}
|
| 1200 |
+
|
| 1201 |
+
print(f"=== Daily return heatmap read — {as_of} (last {len(ret_last)} sessions) ===", file=buf3)
|
| 1202 |
+
print("\n[Today]", file=buf3)
|
| 1203 |
+
print(f"Up: {up}/{N} ({_pct(up/N)}). Down: {dn}/{N} ({_pct(dn/N)}). Flat: {flat}.", file=buf3)
|
| 1204 |
+
print(f"Mean: {_pp(mean)}. Median: {_pp(med)}. Std: {_pp(std)}. IQR: {_pp(iqr)}.", file=buf3)
|
| 1205 |
+
print(f"Moves ≥ {int(thr*100)}%: +{big_up}. Moves ≤ -{int(thr*100)}%: {big_dn}.", file=buf3)
|
| 1206 |
+
|
| 1207 |
+
print("\n[Recent breadth]", file=buf3)
|
| 1208 |
+
print(f"{_pct(pct_pos_w)} of names have a positive average over the last {w} sessions.", file=buf3)
|
| 1209 |
+
|
| 1210 |
+
print("\n[Dispersion]", file=buf3)
|
| 1211 |
+
print(f"Cross-section std today: {_pp(today_std)} (window percentile ~{disp_pct*100:.0f}th).", file=buf3)
|
| 1212 |
+
|
| 1213 |
+
print("\n[Leaders today]", file=buf3)
|
| 1214 |
+
for t, v in leaders.items():
|
| 1215 |
+
stv = streaks.get(t, 0)
|
| 1216 |
+
lab = ("flat" if stv == 0 else (f"{stv}d up" if stv > 0 else f"{-stv}d down"))
|
| 1217 |
+
print(f" {t}: {_pp(v)} ({lab})", file=buf3)
|
| 1218 |
+
|
| 1219 |
+
print("\n[Laggards today]", file=buf3)
|
| 1220 |
+
for t, v in laggards.items():
|
| 1221 |
+
stv = streaks.get(t, 0)
|
| 1222 |
+
lab = ("flat" if stv == 0 else (f"{stv}d up" if stv > 0 else f"{-stv}d down"))
|
| 1223 |
+
print(f" {t}: {_pp(v)} ({lab})", file=buf3)
|
| 1224 |
+
|
| 1225 |
+
print("\n[What to monitor]", file=buf3)
|
| 1226 |
+
print("Watch big-move counts and the 5-day positive share for follow-through.", file=buf3)
|
| 1227 |
+
print("Track dispersion; elevated dispersion favors relative moves over index moves.", file=buf3)
|
| 1228 |
+
|
| 1229 |
+
st.text(buf3.getvalue())
|
| 1230 |
+
|
| 1231 |
+
# ===================== SECTION 4 — Percentile Momentum Heatmap =====================
|
| 1232 |
+
st.header("Percentile Momentum Heatmap")
|
| 1233 |
+
|
| 1234 |
+
# Methodology (standalone expander)
|
| 1235 |
+
with st.expander("Methodology", expanded=False):
|
| 1236 |
+
st.write("Ranks each stock’s medium-horizon return against the cross-section each day.")
|
| 1237 |
+
st.write("Use it to spot broad momentum, rotation, and persistence.")
|
| 1238 |
+
|
| 1239 |
+
st.write("**n-day return (per name)**")
|
| 1240 |
+
st.latex(r"r^{(n)}_{i,t}=\frac{P_{i,t}}{P_{i,t-n}}-1")
|
| 1241 |
+
|
| 1242 |
+
st.write("**Cross-sectional percentile (per day)**")
|
| 1243 |
+
st.latex(r"p_{i,t}=\frac{\operatorname{rank}\!\left(r^{(n)}_{i,t}\right)}{N}")
|
| 1244 |
+
st.write("0 means worst in the universe that day. 1 means best.")
|
| 1245 |
+
st.write("The heatmap shows p_{i,t}. Rows are sorted by the latest percentile.")
|
| 1246 |
+
|
| 1247 |
+
st.write("**Breadth buckets (how to read)**")
|
| 1248 |
+
st.latex(r"\text{Top\,20\%}_t=\frac{1}{N}\sum_{i}\mathbf{1}[p_{i,t}\ge 0.80]")
|
| 1249 |
+
st.latex(r"\text{Bottom\,20\%}_t=\frac{1}{N}\sum_{i}\mathbf{1}[p_{i,t}\le 0.20]")
|
| 1250 |
+
st.write("High Top-20% share signals broad upside momentum. High Bottom-20% share signals broad weakness.")
|
| 1251 |
+
|
| 1252 |
+
st.write("**Momentum shift vs a short lookback**")
|
| 1253 |
+
st.latex(r"\Delta p_i=p_{i,T}-p_{i,T-w}")
|
| 1254 |
+
st.write("Improving names: Δp_i > 0. Weakening names: Δp_i < 0.")
|
| 1255 |
+
|
| 1256 |
+
st.write("**Persistence (top/bottom quintile)**")
|
| 1257 |
+
st.latex(r"\text{TopQ}_{i}=\sum_{k=0}^{w-1}\mathbf{1}[p_{i,T-k}\ge 0.80]")
|
| 1258 |
+
st.latex(r"\text{BotQ}_{i}=\sum_{k=0}^{w-1}\mathbf{1}[p_{i,T-k}\le 0.20]")
|
| 1259 |
+
st.write("Names with TopQ = w held leadership. BotQ = w stayed weak.")
|
| 1260 |
+
|
| 1261 |
+
st.write("**Practical reads**")
|
| 1262 |
+
st.write("- Rising median percentile and high Top-20% share: trend has breadth.")
|
| 1263 |
+
st.write("- Mixed median with both tails active: rotation/dispersion regime.")
|
| 1264 |
+
st.write("- Persistent top-quintile list: candidates for follow-through.")
|
| 1265 |
+
st.write("- Persistent bottom-quintile list: candidates for mean-reversion checks.")
|
| 1266 |
+
|
| 1267 |
+
look_days = int(mom_look)
|
| 1268 |
+
ret_n = clean_close.pct_change(look_days)
|
| 1269 |
+
ret_n = ret_n.iloc[look_days:]
|
| 1270 |
+
if ret_n.empty:
|
| 1271 |
+
st.warning("Not enough data for the momentum heatmap.")
|
| 1272 |
+
else:
|
| 1273 |
+
perc = ret_n.rank(axis=1, pct=True)
|
| 1274 |
+
order2 = perc.iloc[-1].sort_values(ascending=True).index
|
| 1275 |
+
perc = perc[order2]
|
| 1276 |
+
|
| 1277 |
+
z = perc.T.values
|
| 1278 |
+
x = perc.index
|
| 1279 |
+
y = list(order2)
|
| 1280 |
+
|
| 1281 |
+
n_dates = len(x)
|
| 1282 |
+
step = max(1, n_dates // 10)
|
| 1283 |
+
xtick_vals = x[::step]
|
| 1284 |
+
xtick_texts = [ts.strftime("%Y-%m-%d") for ts in xtick_vals]
|
| 1285 |
+
|
| 1286 |
+
fig_pm = go.Figure(go.Heatmap(
|
| 1287 |
+
z=z, x=x, y=y,
|
| 1288 |
+
colorscale="Viridis",
|
| 1289 |
+
zmin=0, zmax=1,
|
| 1290 |
+
colorbar=dict(title="Return Percentile"),
|
| 1291 |
+
hovertemplate="%{y}<br>%{x|%Y-%m-%d}<br>%{z:.0%}<extra></extra>"
|
| 1292 |
+
))
|
| 1293 |
+
|
| 1294 |
+
height = max(800, min(3200, 18 * len(y)))
|
| 1295 |
+
fig_pm.update_layout(
|
| 1296 |
+
template="plotly_dark",
|
| 1297 |
+
title=f"{look_days}-Day Return Percentile Heatmap",
|
| 1298 |
+
height=height,
|
| 1299 |
+
margin=dict(l=110, r=40, t=60, b=60),
|
| 1300 |
+
font=dict(color="white")
|
| 1301 |
+
)
|
| 1302 |
+
fig_pm.update_yaxes(title="Tickers (sorted by latest %ile)", tickfont=dict(size=8))
|
| 1303 |
+
fig_pm.update_xaxes(title="Date", tickmode="array", tickvals=xtick_vals, ticktext=xtick_texts, tickangle=45)
|
| 1304 |
+
st.plotly_chart(fig_pm, use_container_width=True)
|
| 1305 |
+
|
| 1306 |
+
# Dynamic Interpretation (standalone expander)
|
| 1307 |
+
with st.expander("Dynamic Interpretation", expanded=False):
|
| 1308 |
+
buf4 = io.StringIO()
|
| 1309 |
+
if perc.empty or ret_n.empty:
|
| 1310 |
+
print("No data for interpretation.", file=buf4)
|
| 1311 |
+
else:
|
| 1312 |
+
as_of = perc.index[-1].date()
|
| 1313 |
+
last_p = perc.iloc[-1].astype(float)
|
| 1314 |
+
last_r = ret_n.iloc[-1].astype(float)
|
| 1315 |
+
|
| 1316 |
+
N = int(last_p.shape[0])
|
| 1317 |
+
mean_p = float(last_p.mean()); med_p = float(last_p.median())
|
| 1318 |
+
q25 = float(last_p.quantile(0.25)); q75 = float(last_p.quantile(0.75))
|
| 1319 |
+
iqr_w = q75 - q25
|
| 1320 |
+
|
| 1321 |
+
top10 = float((last_p >= 0.90).mean() * 100)
|
| 1322 |
+
top20 = float((last_p >= 0.80).mean() * 100)
|
| 1323 |
+
mid40 = float(((last_p > 0.40) & (last_p < 0.60)).mean() * 100)
|
| 1324 |
+
bot20 = float((last_p <= 0.20).mean() * 100)
|
| 1325 |
+
bot10 = float((last_p <= 0.10).mean() * 100)
|
| 1326 |
+
|
| 1327 |
+
pct_up = float((last_r > 0).mean() * 100)
|
| 1328 |
+
|
| 1329 |
+
look = min(5, len(perc))
|
| 1330 |
+
delta = (last_p - perc.iloc[-look].astype(float)).dropna()
|
| 1331 |
+
improving = float((delta > 0).mean() * 100)
|
| 1332 |
+
weakening = float((delta < 0).mean() * 100)
|
| 1333 |
+
delta_med = float(delta.median())
|
| 1334 |
+
|
| 1335 |
+
k = min(10, N)
|
| 1336 |
+
leaders = last_p.sort_values(ascending=False).head(k)
|
| 1337 |
+
laggards = last_p.sort_values(ascending=True ).head(k)
|
| 1338 |
+
|
| 1339 |
+
window_p = 5
|
| 1340 |
+
top_quint = (perc.tail(window_p) >= 0.80).sum()
|
| 1341 |
+
bot_quint = (perc.tail(window_p) <= 0.20).sum()
|
| 1342 |
+
persistent_up = top_quint[top_quint == window_p].index.tolist()
|
| 1343 |
+
persistent_dn = bot_quint[bot_quint == window_p].index.tolist()
|
| 1344 |
+
|
| 1345 |
+
print(f"=== {look_days}-day momentum read — {as_of} ===", file=buf4)
|
| 1346 |
+
print("\n[Snapshot]", file=buf4)
|
| 1347 |
+
print(f"Names: {N}. Up on window: {pct_up:.1f}%.", file=buf4)
|
| 1348 |
+
print(f"Mean percentile: {mean_p:.2f}. Median: {med_p:.2f}.", file=buf4)
|
| 1349 |
+
print(f"IQR: {q25:.2f}–{q75:.2f} (width {iqr_w:.2f}).", file=buf4)
|
| 1350 |
+
|
| 1351 |
+
print("\n[Breadth]", file=buf4)
|
| 1352 |
+
print(f"Top 10%: {top10:.1f}%. Top 20%: {top20:.1f}%.", file=buf4)
|
| 1353 |
+
print(f"Middle 40–60%: {mid40:.1f}%.", file=buf4)
|
| 1354 |
+
print(f"Bottom 20%: {bot20:.1f}%. Bottom 10%: {bot10:.1f}%.", file=buf4)
|
| 1355 |
+
|
| 1356 |
+
print("\n[Shift]", file=buf4)
|
| 1357 |
+
print(f"Improving vs {look} days ago: {improving:.1f}%. Weakening: {weakening:.1f}%.", file=buf4)
|
| 1358 |
+
print(f"Median percentile change: {delta_med:+.2f}.", file=buf4)
|
| 1359 |
+
|
| 1360 |
+
print("\n[Leaders]", file=buf4)
|
| 1361 |
+
for t, v in leaders.items():
|
| 1362 |
+
print(f" {t}: {v:.2f}", file=buf4)
|
| 1363 |
+
|
| 1364 |
+
print("\n[Laggards]", file=buf4)
|
| 1365 |
+
for t, v in laggards.items():
|
| 1366 |
+
print(f" {t}: {v:.2f}", file=buf4)
|
| 1367 |
+
|
| 1368 |
+
print("\n[Persistence]", file=buf4)
|
| 1369 |
+
if persistent_up:
|
| 1370 |
+
up_list = ", ".join(persistent_up[:15]) + ("…" if len(persistent_up) > 15 else "")
|
| 1371 |
+
print(f"Top-quintile {window_p} days: {up_list}", file=buf4)
|
| 1372 |
+
else:
|
| 1373 |
+
print("No names stayed in the top quintile.", file=buf4)
|
| 1374 |
+
if persistent_dn:
|
| 1375 |
+
dn_list = ", ".join(persistent_dn[:15]) + ("…" if len(persistent_dn) > 15 else "")
|
| 1376 |
+
print(f"Bottom-quintile {window_p} days: {dn_list}", file=buf4)
|
| 1377 |
+
else:
|
| 1378 |
+
print("No names stayed in the bottom quintile.", file=buf4)
|
| 1379 |
|
| 1380 |
+
print("\n[Focus]", file=buf4)
|
| 1381 |
+
print("Watch the top-quintile share. Rising share supports continuation.", file=buf4)
|
| 1382 |
+
print("Track the median percentile. Sustained readings above 0.60 show broad momentum.", file=buf4)
|
| 1383 |
+
print("Use persistence lists for follow-through and mean-reversion checks.", file=buf4)
|
| 1384 |
|
| 1385 |
+
st.text(buf4.getvalue())
|
| 1386 |
|
| 1387 |
# Hide default Streamlit style
|
| 1388 |
st.markdown(
|