import os from pathlib import Path from base64 import b64encode import streamlit as st import pandas as pd import altair as alt from datasets import load_dataset # --- Page setup --- st.set_page_config( page_title="RAT-Bench Leaderboard", page_icon="πŸ“Š", layout="centered", ) # --- Global CSS --- st.markdown(""" """, unsafe_allow_html=True) # ╔══════════════════════════════════════════════════════════════╗ # β•‘ 1. Hero Banner β•‘ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• # Embed the Rat_Bench logo with open(Path(__file__).parent / "images" / "Rat_Bench.png", "rb") as f: logo_b64 = b64encode(f.read()).decode("utf-8") st.markdown(f"""

RAT-Bench: A Comprehensive Benchmark for Text Anonymization

RAT-Bench is a synthetic benchmark for evaluating how well anonymization tools prevent re-identification of individuals in text.
Using U.S. demographic statistics, we generate text with direct and indirect identifiers, anonymize it, and measure how easily an LLM-based attacker can still re-identify people.

Curious how your tool compares? Follow the instructions in our repo and send us your results!

""", unsafe_allow_html=True) # ╔══════════════════════════════════════════════════════════════╗ # β•‘ Load Data β•‘ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• @st.cache_data def load_results(): ds = load_dataset( "imperial-cpg/rat-bench-results", split="train", token=os.environ.get("HF_TOKEN"), ) return ds.to_pandas() df = load_results() # ╔══════════════════════════════════════════════════════════════╗ # β•‘ 2. Metric Summary Cards β•‘ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• tool_col = "Anonymization tool" baseline_name = "No anonymization" non_baseline = df[df[tool_col].str.strip().str.lower() != baseline_name.lower()] num_tools = len(non_baseline) best_tool = non_baseline.loc[non_baseline["English Avg"].idxmin(), tool_col] languages = ["English", "Spanish", "Simplified Chinese"] num_langs = len(languages) # Best risk-BLEU tradeoff: lowest risk among tools with above-median BLEU bleu_col_src = "BLEU score (English, Explicit avg)" with_bleu = non_baseline.dropna(subset=[bleu_col_src, "English Avg"]) median_bleu = with_bleu[bleu_col_src].median() good_bleu = with_bleu[with_bleu[bleu_col_src] >= median_bleu] best_tradeoff = good_bleu.loc[good_bleu["English Avg"].idxmin(), tool_col] st.markdown(f"""
Tools Evaluated
{num_tools}
Lowest Avg Risk (EN)
{best_tool}
Best Risk-BLEU Tradeoff
{best_tradeoff}
Languages
{num_langs}
""", unsafe_allow_html=True) # ╔══════════════════════════════════════════════════════════════╗ # β•‘ 3. Leaderboard Table β•‘ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• st.markdown('
Leaderboard
', unsafe_allow_html=True) st.caption( "

Toggle which results to display. " "The No anonymization baseline is pinned on top (not ranked). " "Tools are ranked by Average Risk (lower is better).

", unsafe_allow_html=True, ) _, c1, c2, _ = st.columns([1, 2, 2, 1], gap="medium") with c1: language = st.selectbox("Language", languages) with c2: st.write("") # vertical spacer show_levels = st.checkbox("Show difficulty levels", value=True, key="levels_cb") # --- Build display table --- work = df.copy() work["Average Risk (Explicit)"] = work[f"{language} Avg"] work = work.dropna(subset=[f"{language} Avg"]) baseline_mask = work[tool_col].str.strip().str.lower() == baseline_name.lower() others = work[~baseline_mask].sort_values(f"{language} Avg").reset_index(drop=True) others["Rank"] = (others.index + 1).astype(str) baselines = work[baseline_mask].copy() baselines["Rank"] = "—" final = pd.concat([baselines, others], ignore_index=True) cols = ["Rank", tool_col, "Type"] if not show_levels: cols += ["Average Risk (Explicit)"] elif language == "English": cols += [ f"{language} Explicit (easy)", f"{language} Explicit (hard)", "Average Risk (Explicit)", f"{language} Implicit", ] else: cols += [f"{language} Explicit (easy)", "Average Risk (Explicit)"] if language == "English": cols += [f"BLEU score ({language}, Explicit avg)"] rename_map = { f"{language} Explicit (easy)": "Explicit (easy)", f"{language} Explicit (hard)": "Explicit (hard)", f"{language} Implicit": "Implicit", f"BLEU score ({language}, Explicit avg)": "Avg BLEU (Explicit)", } display = final[cols].rename(columns=rename_map) # --- Badge helper --- BADGE_CLS = { "NER-based": "badge-ner", "LLM-based": "badge-llm", "Perturbation": "badge-perturb", "Baseline": "badge-baseline", } def _badge(typ: str) -> str: cls = BADGE_CLS.get(typ, "badge-baseline") return f'{typ}' # --- Risk heatmap color (green→yellow→red) --- def _risk_color(val, lo=0, hi=100): """Return a CSS background for risk values: green(0) -> yellow(50) -> red(100).""" try: v = float(val) except (ValueError, TypeError): return "" t = max(0.0, min(1.0, (v - lo) / (hi - lo))) if t <= 0.5: r = int(76 + (t / 0.5) * (234 - 76)) g = int(175 + (t / 0.5) * (179 - 175)) b = int(80 + (t / 0.5) * (8 - 80)) else: r = int(234 + ((t - 0.5) / 0.5) * (220 - 234)) g = int(179 - ((t - 0.5) / 0.5) * (179 - 53)) b = int(8 + ((t - 0.5) / 0.5) * (69 - 8)) return f"background:rgba({r},{g},{b},0.22); font-weight:600;" # --- BLEU heatmap color (red→yellow→green, higher=better) --- def _bleu_color(val, lo=0.5, hi=1.0): """Return a CSS background for BLEU values: red(low) -> yellow(mid) -> green(high).""" try: v = float(val) except (ValueError, TypeError): return "" t = max(0.0, min(1.0, (v - lo) / (hi - lo))) if t <= 0.5: # red to yellow r = int(220 + (t / 0.5) * (234 - 220)) g = int(53 + (t / 0.5) * (179 - 53)) b = int(69 + (t / 0.5) * (8 - 69)) else: # yellow to green r = int(234 - ((t - 0.5) / 0.5) * (234 - 76)) g = int(179 - ((t - 0.5) / 0.5) * (179 - 175)) b = int(8 + ((t - 0.5) / 0.5) * (80 - 8)) return f"background:rgba({r},{g},{b},0.22); font-weight:600;" # Risk value columns in the display table risk_cols = {"Explicit (easy)", "Explicit (hard)", "Implicit", "Average Risk (Explicit)"} bleu_col_name = "Avg BLEU (Explicit)" # --- Build HTML table --- html_rows = [] for _, row in display.iterrows(): is_baseline = str(row.get(tool_col, "")).strip().lower() == baseline_name.lower() tr_cls = ' class="baseline-row"' if is_baseline else "" cells = [] for col in display.columns: val = row[col] if col == "Type": cells.append(f"{_badge(str(val))}") elif col in risk_cols and not is_baseline: style = _risk_color(val) formatted = f"{val:.1f}" if pd.notna(val) else "—" cells.append(f'{formatted}') elif col == bleu_col_name and not is_baseline: style = _bleu_color(val) formatted = f"{val:.2f}" if pd.notna(val) else "—" cells.append(f'{formatted}') elif col in risk_cols or col == bleu_col_name: formatted = f"{val:.2f}" if pd.notna(val) and col == bleu_col_name else (f"{val:.1f}" if pd.notna(val) else "—") cells.append(f"{formatted}") else: cells.append(f"{val}") html_rows.append(f"{''.join(cells)}") header_cells = "".join(f"{c}" for c in display.columns) table_html = f""" {header_cells}{''.join(html_rows)}
""" st.markdown(table_html, unsafe_allow_html=True) # ╔══════════════════════════════════════════════════════════════╗ # β•‘ 4. Re-identification Risk Explanation + Overview Figure β•‘ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• st.markdown("
", unsafe_allow_html=True) st.markdown("""

How Re-identification Risk Is Computed

We measure how much identifying information survives anonymization. An LLM-based attacker reads the anonymized text and attempts to recover identifying attributes.

Direct Identifiers
If any direct identifier (e.g., full address, SSN) is recovered by the attacker, the re-identification risk is automatically set to 1.
Indirect Identifiers
Otherwise, risk is computed from the set of indirect identifiers recovered (state of residence, date of birth, marital status, …). The risk equals the probability that their combination uniquely identifies the individual in the population.
""", unsafe_allow_html=True) # Original overview figure with open(Path(__file__).parent / "images" / "overview.png", "rb") as f: overview_b64 = b64encode(f.read()).decode("utf-8") st.markdown(f"""
""", unsafe_allow_html=True) st.markdown("""

Figure: Re-identification risk based on direct and indirect identifiers.

""", unsafe_allow_html=True) # ╔══════════════════════════════════════════════════════════════╗ # β•‘ 5. Interactive Risk vs BLEU Scatter (Altair) β•‘ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• st.markdown('
Re-identification Risk vs. BLEU Score
', unsafe_allow_html=True) st.markdown( "

" "A good anonymizer sits in the lower-right corner: low risk, high BLEU (text utility preserved). " "Hover over points for details.

", unsafe_allow_html=True, ) scatter_df = df.dropna(subset=["BLEU score (English, Explicit avg)", "English Avg"]).copy() scatter_df = scatter_df[scatter_df[tool_col].str.strip().str.lower() != baseline_name.lower()] scatter_df = scatter_df.rename(columns={ "English Avg": "Average Risk", "BLEU score (English, Explicit avg)": "BLEU Score", tool_col: "Tool", }) type_colors = alt.Scale( domain=["NER-based", "LLM-based", "Perturbation"], range=["#3b82f6", "#8b5cf6", "#f59e0b"], ) points = ( alt.Chart(scatter_df) .mark_circle(size=120, opacity=0.85, stroke="#fff", strokeWidth=1) .encode( x=alt.X("BLEU Score:Q", scale=alt.Scale(domain=[0.5, 1.0]), title="BLEU Score (higher = more utility)"), y=alt.Y("Average Risk:Q", scale=alt.Scale(domain=[20, 100]), title="Average Risk % (lower = safer)"), color=alt.Color("Type:N", scale=type_colors, legend=alt.Legend(title="Type", orient="bottom")), tooltip=["Tool:N", "Type:N", alt.Tooltip("Average Risk:Q", format=".1f"), alt.Tooltip("BLEU Score:Q", format=".2f")], ) ) labels = ( alt.Chart(scatter_df) .mark_text(align="left", dx=8, dy=-6, fontSize=11, fontWeight=500) .encode( x="BLEU Score:Q", y="Average Risk:Q", text="Tool:N", color=alt.Color("Type:N", scale=type_colors, legend=None), ) ) chart = ( (points + labels) .properties(width=500, height=380) .configure_axis( grid=True, gridColor="rgba(128,128,128,0.12)", labelFontSize=12, titleFontSize=13, titleFontWeight=600, ) .configure_view(strokeWidth=0) .interactive() ) st.altair_chart(chart, use_container_width=True) # ╔══════════════════════════════════════════════════════════════╗ # β•‘ 6. BibTeX Citation β•‘ # β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• st.markdown("
", unsafe_allow_html=True) st.markdown('
BibTeX
', unsafe_allow_html=True) st.markdown("If you found this useful for your work, please cite:") st.code("""@article{krvco2026rat, title={RAT-Bench: A Comprehensive Benchmark for Text Anonymization}, author={Kr{\v{c}}o, Nata{\v{s}}a and Yao, Zexi and Meeus, Matthieu and de Montjoye, Yves-Alexandre}, journal={arXiv preprint arXiv:2602.12806}, year={2026} }""", language="bibtex")