|
|
import numpy as np
|
|
|
import pandas as pd
|
|
|
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def normalize_probs(p: np.ndarray) -> np.ndarray:
|
|
|
p = np.asarray(p, dtype=float)
|
|
|
p = np.clip(p, 0.0, None)
|
|
|
s = float(p.sum())
|
|
|
if s <= 0:
|
|
|
return np.ones_like(p) / len(p)
|
|
|
return p / s
|
|
|
|
|
|
|
|
|
def expected_loss(loss: np.ndarray, p: np.ndarray) -> np.ndarray:
|
|
|
|
|
|
return loss @ p
|
|
|
|
|
|
|
|
|
def regret_matrix(loss: np.ndarray) -> np.ndarray:
|
|
|
|
|
|
return loss - loss.min(axis=0, keepdims=True)
|
|
|
|
|
|
|
|
|
def max_regret(regret: np.ndarray) -> np.ndarray:
|
|
|
return regret.max(axis=1)
|
|
|
|
|
|
|
|
|
def cvar_discrete(losses: np.ndarray, probs: np.ndarray, alpha: float = 0.8) -> float:
|
|
|
"""
|
|
|
CVaRα for discrete outcomes:
|
|
|
- Sort by loss ascending
|
|
|
- Find tail mass beyond alpha (i.e., worst 1-alpha probability)
|
|
|
- Return probability-weighted average loss over the tail
|
|
|
"""
|
|
|
alpha = float(alpha)
|
|
|
alpha = min(max(alpha, 0.0), 1.0)
|
|
|
|
|
|
order = np.argsort(losses)
|
|
|
l = np.asarray(losses, dtype=float)[order]
|
|
|
p = np.asarray(probs, dtype=float)[order]
|
|
|
|
|
|
p = normalize_probs(p)
|
|
|
cum = np.cumsum(p)
|
|
|
|
|
|
|
|
|
tail = cum >= alpha
|
|
|
if not np.any(tail):
|
|
|
|
|
|
tail[-1] = True
|
|
|
|
|
|
tail_p = p[tail].sum()
|
|
|
if tail_p <= 0:
|
|
|
return float(l[-1])
|
|
|
|
|
|
return float((l[tail] * p[tail]).sum() / tail_p)
|
|
|
|
|
|
|
|
|
def cvar_per_action(loss: np.ndarray, p: np.ndarray, alpha: float) -> np.ndarray:
|
|
|
return np.array([cvar_discrete(loss[i, :], p, alpha=alpha) for i in range(loss.shape[0])], dtype=float)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title="Decision Kernel Lite", layout="wide")
|
|
|
st.markdown(
|
|
|
"""
|
|
|
<style>
|
|
|
section[data-testid="stSidebar"] {
|
|
|
width: 420px !important;
|
|
|
}
|
|
|
section[data-testid="stSidebar"] > div {
|
|
|
width: 420px !important;
|
|
|
}
|
|
|
</style>
|
|
|
""",
|
|
|
unsafe_allow_html=True,
|
|
|
)
|
|
|
|
|
|
st.title("Decision Kernel Lite")
|
|
|
st.caption("One output: choose an action under uncertainty. Three lenses: Expected Loss, Regret, CVaR.")
|
|
|
|
|
|
|
|
|
default_actions = ["A1", "A2", "A3"]
|
|
|
default_scenarios = ["Low", "Medium", "High"]
|
|
|
default_probs = [0.3, 0.4, 0.3]
|
|
|
default_loss = np.array([[10, 5, 1], [6, 4, 6], [2, 6, 12]])
|
|
|
|
|
|
st.sidebar.header("Controls")
|
|
|
alpha = st.sidebar.slider("CVaR alpha (tail threshold)", 0.50, 0.99, 0.80, 0.01)
|
|
|
tie_policy = st.sidebar.selectbox("Tie policy", ["First", "Show all"], index=1)
|
|
|
|
|
|
st.sidebar.header("Decision rule")
|
|
|
primary_rule = st.sidebar.radio("Choose action by", ["Expected Loss", "Minimax Regret", "CVaR"], index=0)
|
|
|
|
|
|
|
|
|
left, right = st.columns([1.2, 1])
|
|
|
|
|
|
with left:
|
|
|
st.subheader("1) Define scenarios + probabilities")
|
|
|
scen_df = pd.DataFrame({"Scenario": default_scenarios, "Probability": default_probs})
|
|
|
scen_df = st.data_editor(scen_df, num_rows="dynamic", use_container_width=True)
|
|
|
|
|
|
|
|
|
scen_df = scen_df.dropna(subset=["Scenario"]).copy()
|
|
|
scen_df["Scenario"] = scen_df["Scenario"].astype(str).str.strip()
|
|
|
scen_df = scen_df[scen_df["Scenario"] != ""]
|
|
|
if scen_df.empty:
|
|
|
st.error("Add at least one scenario.")
|
|
|
st.stop()
|
|
|
|
|
|
scenarios = scen_df["Scenario"].tolist()
|
|
|
probs_raw = scen_df["Probability"].fillna(0.0).astype(float).to_numpy()
|
|
|
probs = normalize_probs(probs_raw)
|
|
|
|
|
|
if not np.isclose(probs_raw.sum(), 1.0):
|
|
|
st.info(f"Probabilities normalized to sum to 1.0 (raw sum was {probs_raw.sum():.3f}).")
|
|
|
|
|
|
with right:
|
|
|
st.subheader("2) Define actions + losses")
|
|
|
|
|
|
loss_df = pd.DataFrame(default_loss, index=default_actions, columns=default_scenarios)
|
|
|
|
|
|
|
|
|
|
|
|
loss_df = loss_df.reindex(columns=scenarios)
|
|
|
for c in scenarios:
|
|
|
if c not in loss_df.columns:
|
|
|
loss_df[c] = 0.0
|
|
|
loss_df = loss_df[scenarios]
|
|
|
|
|
|
loss_df = st.data_editor(
|
|
|
loss_df.reset_index().rename(columns={"index": "Action"}),
|
|
|
num_rows="dynamic",
|
|
|
use_container_width=True,
|
|
|
)
|
|
|
|
|
|
loss_df = loss_df.dropna(subset=["Action"]).copy()
|
|
|
loss_df["Action"] = loss_df["Action"].astype(str).str.strip()
|
|
|
loss_df = loss_df[loss_df["Action"] != ""]
|
|
|
if loss_df.empty:
|
|
|
st.error("Add at least one action.")
|
|
|
st.stop()
|
|
|
|
|
|
actions = loss_df["Action"].tolist()
|
|
|
loss_vals = loss_df.drop(columns=["Action"]).fillna(0.0).astype(float).to_numpy()
|
|
|
|
|
|
|
|
|
loss_mat = loss_vals
|
|
|
A, S = loss_mat.shape
|
|
|
|
|
|
exp = expected_loss(loss_mat, probs)
|
|
|
reg = regret_matrix(loss_mat)
|
|
|
mxr = max_regret(reg)
|
|
|
cvar = cvar_per_action(loss_mat, probs, alpha=alpha)
|
|
|
|
|
|
results = pd.DataFrame(
|
|
|
{
|
|
|
"Expected Loss": exp,
|
|
|
"Max Regret": mxr,
|
|
|
f"CVaR@{alpha:.2f}": cvar,
|
|
|
},
|
|
|
index=actions,
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tail_ratio = float(results[f"CVaR@{alpha:.2f}"].min() / max(results["Expected Loss"].min(), 1e-9))
|
|
|
|
|
|
if tail_ratio >= 1.5:
|
|
|
rule_reco = "CVaR"
|
|
|
rule_reason = f"Tail risk dominates average (best CVaR / best Expected Loss = {tail_ratio:.2f})."
|
|
|
else:
|
|
|
rule_reco = "Expected Loss"
|
|
|
rule_reason = f"Tail risk is not extreme (ratio = {tail_ratio:.2f}); average-optimal is defensible."
|
|
|
|
|
|
|
|
|
use_rule_reco = st.sidebar.checkbox("Use recommended rule (heuristic)", value=False)
|
|
|
if use_rule_reco:
|
|
|
primary_rule = rule_reco
|
|
|
|
|
|
|
|
|
|
|
|
if primary_rule == "Expected Loss":
|
|
|
metric = results["Expected Loss"]
|
|
|
best_val = metric.min()
|
|
|
best_actions = metric[metric == best_val].index.tolist()
|
|
|
elif primary_rule == "Minimax Regret":
|
|
|
metric = results["Max Regret"]
|
|
|
best_val = metric.min()
|
|
|
best_actions = metric[metric == best_val].index.tolist()
|
|
|
else:
|
|
|
col = f"CVaR@{alpha:.2f}"
|
|
|
metric = results[col]
|
|
|
best_val = metric.min()
|
|
|
best_actions = metric[metric == best_val].index.tolist()
|
|
|
|
|
|
chosen = best_actions[0] if tie_policy == "First" else ", ".join(best_actions)
|
|
|
st.sidebar.header("Rule guidance (when to use what)")
|
|
|
|
|
|
st.sidebar.markdown(
|
|
|
"""
|
|
|
**Expected Loss (risk-neutral)**
|
|
|
- Use when decisions repeat frequently and you can tolerate variance.
|
|
|
- Use when probabilities are reasonably trusted.
|
|
|
- Optimizes *average* pain.
|
|
|
|
|
|
**Minimax Regret (robust to bad probability estimates)**
|
|
|
- Use when probabilities are unreliable or politically contested.
|
|
|
- Use for one-shot / high-accountability decisions.
|
|
|
- Minimizes “I should have done X” exposure.
|
|
|
|
|
|
**CVaR (tail-risk protection)**
|
|
|
- Use when rare bad outcomes are unacceptable (ruin / safety / bankruptcy).
|
|
|
- Use when downside is asymmetric and must be bounded.
|
|
|
- Optimizes the *average of worst cases* (tail), not the average overall.
|
|
|
"""
|
|
|
)
|
|
|
|
|
|
|
|
|
st.divider()
|
|
|
topL, topR = st.columns([2, 1], vertical_alignment="center")
|
|
|
with topL:
|
|
|
st.subheader("Decision")
|
|
|
st.markdown(f"### Choose **{chosen}**")
|
|
|
st.caption(f"Primary rule: **{primary_rule}**")
|
|
|
with topR:
|
|
|
st.metric("Scenarios", S)
|
|
|
st.metric("Actions", A)
|
|
|
|
|
|
st.subheader("Evidence table")
|
|
|
st.dataframe(results.style.format("{:.3f}"), use_container_width=True)
|
|
|
|
|
|
st.subheader("Regret table (per action × scenario)")
|
|
|
reg_df = pd.DataFrame(reg, index=actions, columns=scenarios)
|
|
|
st.dataframe(reg_df.style.format("{:.3f}"), use_container_width=True)
|
|
|
|
|
|
|
|
|
st.subheader("Decision Card")
|
|
|
st.info(f"Recommended rule (heuristic): **{rule_reco}** — {rule_reason}")
|
|
|
|
|
|
prob_str = ", ".join([f"{s}={p:.2f}" for s, p in zip(scenarios, probs)])
|
|
|
|
|
|
exp_best = results["Expected Loss"].idxmin()
|
|
|
mxr_best = results["Max Regret"].idxmin()
|
|
|
cvar_best = results[f"CVaR@{alpha:.2f}"].idxmin()
|
|
|
|
|
|
st.code(
|
|
|
f"""DECISION KERNEL LITE — DECISION CARD
|
|
|
|
|
|
Decision:
|
|
|
Choose action {chosen}
|
|
|
|
|
|
Context:
|
|
|
- Actions evaluated: {", ".join(actions)}
|
|
|
- Scenarios considered: {", ".join(scenarios)}
|
|
|
- Probabilities: {prob_str}
|
|
|
|
|
|
Results:
|
|
|
- Expected Loss optimal: {exp_best} ({results.loc[exp_best, "Expected Loss"]:.3f})
|
|
|
- Minimax Regret optimal: {mxr_best} ({results.loc[mxr_best, "Max Regret"]:.3f})
|
|
|
- CVaR@{alpha:.2f} optimal: {cvar_best} ({results.loc[cvar_best, f"CVaR@{alpha:.2f}"]:.3f})
|
|
|
|
|
|
Rule guidance:
|
|
|
- Expected Loss: repeated decisions + trusted probabilities
|
|
|
- Minimax Regret: probabilities unreliable + high accountability
|
|
|
- CVaR: tail-risk unacceptable / ruin protection
|
|
|
|
|
|
Recommended rule (heuristic): {rule_reco} — {rule_reason}
|
|
|
|
|
|
|
|
|
Primary rule used: {primary_rule}
|
|
|
""",
|
|
|
language="text",
|
|
|
)
|
|
|
|
|
|
with st.expander("Raw inputs"):
|
|
|
st.write("Probabilities (normalized):", probs)
|
|
|
st.dataframe(pd.DataFrame(loss_mat, index=actions, columns=scenarios), use_container_width=True)
|
|
|
|