gumble-max-vllm-experiment / adam_analyze_three_class.py
royrin's picture
Upload folder using huggingface_hub
d16e9ef verified
#!/usr/bin/env python
"""
Three-class token labeling analysis: good (0), suspicious (1), dangerous (2).
Classification logic:
- If score >= threshold: good (0)
- If score < threshold AND rank <= 4: suspicious (1)
- If score < threshold AND rank > 4: dangerous (2)
Generates ROC-style curves where:
- FPR = % of actually sampled tokens labeled as dangerous (2)
- Extractable info = log_2(n_good) + n_suspicious * log_2(4) + n_dangerous * log_2(V)
"""
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import argparse
from datetime import datetime
def normalize_score(score: float, min_score: int = -20) -> float:
"""Normalize score to prevent inf values."""
if np.isinf(score):
score = min_score
if score < min_score:
score = min_score
return score
def get_vocab_size_from_path(folder_path):
"""Extract model name from path and return appropriate vocab size."""
folder_lower = folder_path.lower()
if 'llama' in folder_lower:
return 128256
elif 'qwen' in folder_lower:
return 151646
else:
return 250000
def save_plot(filepath_without_ext, dpi=150, bbox_inches='tight'):
"""Save plot as both PNG and PDF."""
plt.savefig(f"{filepath_without_ext}.png", dpi=dpi, bbox_inches=bbox_inches)
plt.savefig(f"{filepath_without_ext}.pdf", dpi=dpi, bbox_inches=bbox_inches)
def precompute_ranks(scores_K):
"""
Precompute ranks for all tokens in scores_K (vectorized).
Args:
scores_K: Array of Gumbel scores for top-K tokens [K]
Returns:
ranks: Array of ranks (number of tokens with better/lower score)
"""
# Lower score = better, so rank is the count of scores < this score
# Vectorized: for each score, count how many are less than it
ranks = (scores_K[:, None] > scores_K[None, :]).sum(axis=1)
return ranks
def classify_tokens_with_ranks(scores_K, ranks, threshold, rank_threshold=4):
"""
Classify tokens using precomputed ranks (vectorized).
Args:
scores_K: Array of Gumbel scores for top-K tokens [K]
ranks: Precomputed ranks for each token [K]
threshold: Score threshold
rank_threshold: Rank threshold (default: 4)
Returns:
labels: Array of labels (0, 1, or 2) for each token
"""
# Vectorized classification
labels = np.zeros(len(scores_K), dtype=int)
# Find tokens that fail verification (score < threshold)
fail_mask = scores_K < threshold
# Among failed tokens, label by rank
labels[fail_mask & (ranks > rank_threshold)] = 2 # dangerous
labels[fail_mask & (ranks <= rank_threshold)] = 1 # suspicious
# good (0) is already set by default for score >= threshold
return labels
def compute_three_class_metrics(
sampled_scores,
sampled_ranks,
top_k_gumbel_scores,
top_k_ranks,
threshold,
vocab_size,
rank_threshold=4
):
"""
Compute FPR and extractable information for three-class labeling.
Args:
sampled_scores: Scores for actually sampled tokens [N]
sampled_ranks: Precomputed ranks for sampled tokens [N]
top_k_gumbel_scores: Scores for all top-K tokens at each position [N, K]
top_k_ranks: Precomputed ranks for all top-K tokens [N, K]
threshold: Score threshold
vocab_size: Vocabulary size for dangerous token cost
rank_threshold: Rank threshold for dangerous classification
Returns:
fpr: False positive rate (% of sampled tokens labeled as dangerous)
extractable_info: Average extractable information per position
"""
# Classify sampled tokens (vectorized)
sampled_labels = np.zeros(len(sampled_scores), dtype=int)
fail_mask = sampled_scores < threshold
sampled_labels[fail_mask & (sampled_ranks > rank_threshold)] = 2 # dangerous
sampled_labels[fail_mask & (sampled_ranks <= rank_threshold)] = 1 # suspicious
# FPR: % of sampled tokens labeled as dangerous (2)
n_sampled_dangerous = (sampled_labels == 2).sum()
fpr = (n_sampled_dangerous / len(sampled_scores)) * 100
# Extractable information
log2_4 = np.log2(4)
log2_V = np.log2(vocab_size)
total_bits = 0
for i, scores_K in enumerate(top_k_gumbel_scores):
# Classify all top-K tokens at this position using precomputed ranks
labels = classify_tokens_with_ranks(scores_K, top_k_ranks[i], threshold, rank_threshold)
n_good = (labels == 0).sum()
n_suspicious = (labels == 1).sum()
n_dangerous = (labels == 2).sum()
# Compute bits for this position
position_bits = 0
if n_good > 0:
position_bits += np.log2(n_good)
if n_suspicious > 0:
position_bits += n_suspicious * log2_4
if n_dangerous > 0:
position_bits += n_dangerous * log2_V
total_bits += position_bits
# Average across positions and normalize to percentage of max possible
max_possible_bits = log2_V
avg_bits_pct = (total_bits / len(top_k_gumbel_scores)) / max_possible_bits * 100
return fpr, avg_bits_pct
def main():
parser = argparse.ArgumentParser(description="Three-class token labeling analysis")
parser.add_argument(
"--folder",
type=str,
required=True,
help="Path to results folder (e.g., gumbel_cgs_analysis_results/20251014_174336)",
)
parser.add_argument(
"--max-thresholds",
type=int,
default=500,
help="Maximum number of thresholds to evaluate (default: 500)",
)
parser.add_argument(
"--rank-threshold",
type=int,
default=4,
help="Rank threshold for dangerous classification (default: 4)",
)
args = parser.parse_args()
folder = args.folder
filename = f"{folder}/all_prompts.pkl"
datestring = datetime.now().strftime("%Y%m%d_%H%M%S")
# Determine vocab size
vocab_size = get_vocab_size_from_path(folder)
print(f"Using vocab size: {vocab_size:,} (detected from folder path: {folder})")
print(f"Using rank threshold: {args.rank_threshold}")
# Load data
print("\nLoading data...")
data = pickle.load(open(filename, "rb"))
# Extract scores
sampled_gumbel_scores = []
top_k_gumbel_scores = []
for result in tqdm(data, desc="Extracting scores"):
sampled_gumbel_scores.append(result["sampled_gumbel_scores"])
top_k_gumbel_scores.append(result["top_k_gumbel_scores"])
sigmas = list(sampled_gumbel_scores[0].keys())
print(f"Sigmas: {sigmas}")
images_dir = f"{folder}/images"
os.makedirs(images_dir, exist_ok=True)
print("\n" + "="*80)
print("COMPUTING THREE-CLASS ROC-STYLE CURVES")
print("="*80)
results_by_sigma = {}
for sigma in sigmas:
print(f"\nProcessing sigma={sigma}...")
# Convert scores (negate to match threshold logic: lower is better)
sampled_scores = np.array([normalize_score(score[sigma]) * -1
for score in sampled_gumbel_scores])
topk_scores = [scores[sigma] * -1 for scores in top_k_gumbel_scores]
# OPTIMIZATION: Precompute ranks once for all positions
print(" Precomputing ranks for all positions...")
sampled_ranks = []
topk_ranks = []
for i in tqdm(range(len(sampled_scores)), desc=" Computing ranks"):
scores_K = topk_scores[i]
ranks_K = precompute_ranks(scores_K)
topk_ranks.append(ranks_K)
# Compute rank for sampled token
sampled_score = sampled_scores[i]
sampled_rank = (scores_K < sampled_score).sum()
sampled_ranks.append(sampled_rank)
sampled_ranks = np.array(sampled_ranks)
print(f" ✓ Precomputed ranks for {len(sampled_scores)} positions")
# Get unique threshold values from sampled scores
unique_thresholds = np.unique(sampled_scores)
unique_thresholds = unique_thresholds[np.isfinite(unique_thresholds)]
print(f" Found {len(unique_thresholds)} unique threshold values")
# Subsample if too many
if len(unique_thresholds) > args.max_thresholds:
print(f" Subsampling to {args.max_thresholds} thresholds")
indices = np.linspace(0, len(unique_thresholds)-1, args.max_thresholds, dtype=int)
unique_thresholds = unique_thresholds[indices]
results = []
# Evaluate each threshold (now much faster with precomputed ranks!)
print(" Evaluating thresholds...")
for threshold in tqdm(unique_thresholds, desc=f" Sigma={sigma}"):
fpr, extractable_info = compute_three_class_metrics(
sampled_scores,
sampled_ranks,
topk_scores,
topk_ranks,
threshold,
vocab_size,
args.rank_threshold
)
results.append({
'threshold': threshold,
'fpr': fpr,
'extractable_info': extractable_info
})
results_by_sigma[sigma] = results
print(f" ✓ Evaluated {len(results)} thresholds for sigma={sigma}")
# Save results
output_file = f"{folder}/three_class_results.pkl"
with open(output_file, 'wb') as f:
pickle.dump(results_by_sigma, f)
print(f"\n✓ Saved three-class results to {output_file}")
# Plot ROC-style curves
print("\nCreating three-class ROC-style plot...")
plt.figure(figsize=(12, 8))
for sigma in sigmas:
results = results_by_sigma[sigma]
# Sort by FPR for plotting
results_sorted = sorted(results, key=lambda x: x['fpr'])
fpr_values = [r['fpr'] for r in results_sorted]
info_values = [r['extractable_info'] for r in results_sorted]
plt.plot(fpr_values, info_values, marker='o', markersize=2,
label=f"sigma={sigma}", alpha=0.7, linewidth=2)
plt.xlabel("False Positive Rate (%) - Sampled tokens labeled as dangerous", fontsize=16)
plt.ylabel("Extractable Information (%)", fontsize=16)
plt.xscale("log")
plt.yscale("log")
plt.title(f"Three-Class Labeling: FPR vs Extractable Information (rank_threshold={args.rank_threshold})",
fontsize=18, fontweight='bold')
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(fontsize=14, loc='best')
plt.grid(True, alpha=0.3)
plt.tight_layout()
save_plot(f"{images_dir}/three_class_roc_curve_{datestring}")
plt.close()
print(f" ✓ Saved three-class ROC plot to {images_dir}/three_class_roc_curve_{datestring}.png/.pdf")
print("\n" + "="*80)
print("THREE-CLASS ANALYSIS COMPLETE!")
print("="*80)
if __name__ == "__main__":
main()