|
|
""" |
|
|
Debug script to understand threshold computation. |
|
|
|
|
|
Shows: |
|
|
1. Distribution of sampled GLS scores |
|
|
2. What percentile values are computed |
|
|
3. What threshold values correspond to each percentile |
|
|
""" |
|
|
|
|
|
import pickle |
|
|
import numpy as np |
|
|
import matplotlib.pyplot as plt |
|
|
from pathlib import Path |
|
|
from tqdm import tqdm |
|
|
|
|
|
def analyze_threshold_computation(results_dir): |
|
|
"""Analyze threshold computation from sampled scores.""" |
|
|
results_dir = Path(results_dir) |
|
|
pkl_files = sorted(results_dir.glob("prompt_*.pkl")) |
|
|
|
|
|
print(f"Found {len(pkl_files)} pickle files") |
|
|
|
|
|
|
|
|
sampled_scores = [] |
|
|
for pkl_file in tqdm(pkl_files, desc="Collecting sampled scores"): |
|
|
with open(pkl_file, "rb") as f: |
|
|
result = pickle.load(f) |
|
|
|
|
|
if "sampled_gumbel_scores" in result: |
|
|
sampled_scores.extend(result["sampled_gumbel_scores"]) |
|
|
|
|
|
sampled_scores = np.array(sampled_scores) |
|
|
|
|
|
print(f"\nCollected {len(sampled_scores)} sampled scores") |
|
|
print(f"Score range: [{sampled_scores.min():.3f}, {sampled_scores.max():.3f}]") |
|
|
print(f"Score mean: {sampled_scores.mean():.3f}") |
|
|
print(f"Score median: {np.median(sampled_scores):.3f}") |
|
|
print(f"Score std: {sampled_scores.std():.3f}") |
|
|
|
|
|
|
|
|
percentiles = np.concatenate([ |
|
|
np.linspace(0, 1, 100), |
|
|
np.linspace(1, 10, 100), |
|
|
np.linspace(10, 50, 200), |
|
|
np.linspace(50, 100, 600), |
|
|
]) |
|
|
percentiles = np.unique(percentiles) |
|
|
|
|
|
|
|
|
thresholds = np.percentile(sampled_scores, percentiles) |
|
|
|
|
|
print(f"\nComputed {len(thresholds)} thresholds") |
|
|
print(f"Threshold range: [{thresholds.min():.3f}, {thresholds.max():.3f}]") |
|
|
|
|
|
|
|
|
print("\nExamples of percentile -> threshold mapping:") |
|
|
for p in [0, 0.1, 0.5, 1, 5, 10, 50, 90, 95, 99, 99.5, 99.9, 100]: |
|
|
if p in percentiles: |
|
|
idx = np.where(percentiles == p)[0][0] |
|
|
t = thresholds[idx] |
|
|
print(f" {p:6.1f}th percentile -> threshold = {t:8.3f}") |
|
|
|
|
|
|
|
|
fig, axes = plt.subplots(2, 2, figsize=(14, 10)) |
|
|
|
|
|
|
|
|
ax = axes[0, 0] |
|
|
ax.hist(sampled_scores, bins=100, edgecolor='black', alpha=0.7) |
|
|
ax.set_xlabel('Sampled GLS Score', fontsize=12) |
|
|
ax.set_ylabel('Count (log scale)', fontsize=12) |
|
|
ax.set_yscale('log') |
|
|
ax.set_title('Distribution of Sampled GLS Scores', fontsize=14, fontweight='bold') |
|
|
ax.axvline(np.median(sampled_scores), color='red', linestyle='--', |
|
|
linewidth=2, label=f'Median = {np.median(sampled_scores):.3f}') |
|
|
ax.axvline(sampled_scores.mean(), color='orange', linestyle='--', |
|
|
linewidth=2, label=f'Mean = {sampled_scores.mean():.3f}') |
|
|
ax.legend() |
|
|
ax.grid(alpha=0.3) |
|
|
|
|
|
|
|
|
ax = axes[0, 1] |
|
|
ax.plot(percentiles, thresholds, linewidth=2) |
|
|
ax.set_xlabel('Percentile (%)', fontsize=12) |
|
|
ax.set_ylabel('Threshold Value', fontsize=12) |
|
|
ax.set_title('Threshold vs Percentile', fontsize=14, fontweight='bold') |
|
|
ax.grid(alpha=0.3) |
|
|
|
|
|
|
|
|
ax = axes[1, 0] |
|
|
mask = percentiles <= 10 |
|
|
ax.plot(percentiles[mask], thresholds[mask], linewidth=2, marker='o', markersize=3) |
|
|
ax.set_xlabel('Percentile (%)', fontsize=12) |
|
|
ax.set_ylabel('Threshold Value', fontsize=12) |
|
|
ax.set_title('Threshold vs Percentile (Zoom: 0-10%)', fontsize=14, fontweight='bold') |
|
|
ax.grid(alpha=0.3) |
|
|
|
|
|
|
|
|
|
|
|
fprs = [] |
|
|
for t in thresholds: |
|
|
fpr = np.mean(sampled_scores < t) * 100 |
|
|
fprs.append(fpr) |
|
|
fprs = np.array(fprs) |
|
|
|
|
|
ax = axes[1, 1] |
|
|
ax.plot(percentiles, fprs, linewidth=2, label='Actual FPR') |
|
|
ax.plot(percentiles, percentiles, linewidth=2, linestyle='--', |
|
|
color='red', alpha=0.5, label='Expected FPR = Percentile') |
|
|
ax.set_xlabel('Percentile (%)', fontsize=12) |
|
|
ax.set_ylabel('FPR (%)', fontsize=12) |
|
|
ax.set_title('Expected vs Actual FPR', fontsize=14, fontweight='bold') |
|
|
ax.legend() |
|
|
ax.grid(alpha=0.3) |
|
|
|
|
|
plt.tight_layout() |
|
|
|
|
|
output_file = Path(results_dir).parent / f"debug_thresholds_{Path(results_dir).name}.pdf" |
|
|
plt.savefig(output_file, dpi=150, bbox_inches='tight') |
|
|
print(f"\nSaved plot to {output_file}") |
|
|
|
|
|
|
|
|
print("\nChecking FPR vs Percentile match:") |
|
|
print("(Should be identical if logic is correct)") |
|
|
for p in [0, 1, 5, 10, 50, 90, 95, 99, 100]: |
|
|
if p in percentiles: |
|
|
idx = np.where(percentiles == p)[0][0] |
|
|
t = thresholds[idx] |
|
|
actual_fpr = fprs[idx] |
|
|
print(f" {p:6.1f}th percentile: threshold={t:8.3f}, expected FPR={p:6.2f}%, actual FPR={actual_fpr:6.2f}%") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import sys |
|
|
|
|
|
if len(sys.argv) < 2: |
|
|
print("Usage: python debug_threshold_computation.py <results_dir>") |
|
|
print("Example: python debug_threshold_computation.py gumbel_cgs_analysis_results/sweep_20251012_211529/sigma0.01_n5000") |
|
|
sys.exit(1) |
|
|
|
|
|
results_dir = sys.argv[1] |
|
|
analyze_threshold_computation(results_dir) |
|
|
|