jbcs2025_experiments_report / runs /api_models /compute_bootstrap_ci.py
abarbosa's picture
update parquet tables and fix typo
ec168b6
import argparse
import json
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import yaml
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
from metrics import compute_metrics
def extract_config_from_log(log_path: Path) -> Dict:
"""
Parse the experiment configuration from run_experiment.log file.
The config is in YAML format at the beginning of the log file.
"""
with open(log_path, 'r', encoding="latin1") as f:
lines = f.readlines()
# Find the start of the YAML config (after the first log line)
yaml_lines = []
in_yaml = False
for line in lines:
# Skip the first log line with timestamp
if not in_yaml and line.strip() and not line.startswith('['):
in_yaml = True
# Stop when we hit another log line with timestamp
if in_yaml and line.startswith('['):
break
if in_yaml:
yaml_lines.append(line)
# Parse the YAML content
yaml_content = ''.join(yaml_lines)
config_dict = yaml.safe_load(yaml_content)
# Convert to OmegaConf DictConfig
return OmegaConf.create(config_dict)
def load_inference_results_by_grader(jsonl_path: Path) -> Tuple[Dict[str, Dict], Dict[str, Dict]]:
"""
Load predictions and labels from the inference results JSONL file, organized by grader.
Returns:
grader_a_data: Dictionary mapping essay_id to {'prediction': score, 'label': label}
grader_b_data: Dictionary mapping essay_id to {'prediction': score, 'label': label}
"""
grader_a_data = {}
grader_b_data = {}
with open(jsonl_path, 'r', encoding='utf-8') as f:
for line in f:
data = json.loads(line.strip())
essay_id = (data['id'], data['id_prompt'], data['essay_text'])
# Determine prediction field based on model type
model_types = ["gpt", "sabia", "deepseek"]
prediction_field = "pontuacao" if any(model in jsonl_path.name for model in model_types) else "prediction"
prediction = data[prediction_field]
essay_data = {
'prediction': prediction,
'label': data['label']
}
if data['reference'] == 'grader_a':
grader_a_data[essay_id] = essay_data
elif data['reference'] == 'grader_b':
grader_b_data[essay_id] = essay_data
assert len(grader_a_data) == len(grader_b_data), "Mismatch in number of essays graded by A and B"
return grader_a_data, grader_b_data
def compute_bootstrap_confidence_intervals_two_graders(
grader_a_data: Dict[str, Dict],
grader_b_data: Dict[str, Dict],
metrics_to_compute: List[str],
cfg: DictConfig,
n_bootstrap: int = 1000,
confidence_level: float = 0.95,
random_state: int = 42,
) -> Dict[str, Tuple[float, float, float]]:
"""
Compute bootstrap confidence intervals for specified metrics using two-grader structure.
For each bootstrap sample:
1. Sample essay IDs with replacement
2. For each sampled essay ID, get both grader A and grader B predictions/labels
3. Compute metrics separately for grader A and grader B
4. Take the mean of the two grader metrics
Parameters:
grader_a_data: Dictionary mapping essay_id to prediction/label for grader A
grader_b_data: Dictionary mapping essay_id to prediction/label for grader B
metrics_to_compute: List of metric names to compute CIs for
cfg: Configuration object
n_bootstrap: Number of bootstrap samples
confidence_level: Confidence level (default 0.95 for 95% CI)
random_state: Random seed for reproducibility
Returns:
Dictionary mapping metric names to (mean, lower_bound, upper_bound)
"""
if random_state is not None:
np.random.seed(random_state)
# Get common essay IDs (should be the same for both graders)
essay_ids = list(grader_a_data.keys())
assert set(essay_ids) == set(grader_b_data.keys()), "Essay IDs don't match between graders"
n_essays = len(essay_ids)
bootstrap_metrics = {metric: [] for metric in metrics_to_compute}
# Perform bootstrap sampling
for _ in tqdm(range(n_bootstrap), desc="Performing Bootstrap samples"):
# Sample indices with replacement
sampled_indices = np.random.choice(n_essays, size=n_essays, replace=True)
# Collect predictions and labels for both graders
grader_a_predictions = []
grader_a_labels = []
grader_b_predictions = []
grader_b_labels = []
for idx in sampled_indices:
essay_id = essay_ids[idx]
grader_a_predictions.append(grader_a_data[essay_id]['prediction'])
grader_a_labels.append(grader_a_data[essay_id]['label'])
grader_b_predictions.append(grader_b_data[essay_id]['prediction'])
grader_b_labels.append(grader_b_data[essay_id]['label'])
# Convert to numpy arrays
grader_a_predictions = np.array(grader_a_predictions)
grader_a_labels = np.array(grader_a_labels)
grader_b_predictions = np.array(grader_b_predictions)
grader_b_labels = np.array(grader_b_labels)
# Compute metrics for each grader
metrics_a = compute_metrics((grader_a_predictions, grader_a_labels), cfg)
metrics_b = compute_metrics((grader_b_predictions, grader_b_labels), cfg)
# Compute mean of the two grader metrics
for metric in metrics_to_compute:
if metric in metrics_a and metric in metrics_b:
mean_metric = (metrics_a[metric] + metrics_b[metric]) / 2
bootstrap_metrics[metric].append(mean_metric)
# Calculate confidence intervals
alpha = 1 - confidence_level
lower_percentile = (alpha / 2) * 100
upper_percentile = (1 - alpha / 2) * 100
ci_results = {}
for metric, values in bootstrap_metrics.items():
if values: # Check if we have values for this metric
values_array = np.array(values)
mean_val = np.mean(values_array)
lower_bound = np.percentile(values_array, lower_percentile)
upper_bound = np.percentile(values_array, upper_percentile)
ci_results[metric] = (mean_val, lower_bound, upper_bound)
return ci_results
def save_results_to_csv(
experiment_id: str,
ci_results: Dict[str, Tuple[float, float, float]],
output_path: Path,
) -> None:
"""
Save bootstrap CI results to CSV file with flattened structure.
"""
from datetime import datetime
# Create a single row with all metrics
row_data = {
'experiment_id': experiment_id,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
# Add each metric's results as separate columns
for metric, (mean_val, lower, upper) in ci_results.items():
row_data[f'{metric}_mean'] = mean_val
row_data[f'{metric}_lower_95ci'] = lower
row_data[f'{metric}_upper_95ci'] = upper
row_data[f'{metric}_ci_width'] = upper - lower
# Convert to DataFrame with a single row
df = pd.DataFrame([row_data])
# Save to CSV
df.to_csv(output_path, index=False)
print(f"Results saved to {output_path}")
def main():
"""
# Basic usage
python compute_bootstrap_ci.py /path/to/experiment/directory
# With custom parameters
python compute_bootstrap_ci.py /path/to/experiment/directory \
--metrics QWK accuracy RMSE HDIV \
--n-bootstrap 2000 \
--confidence-level 0.99
"""
parser = argparse.ArgumentParser(
description='Compute bootstrap confidence intervals for API model inference results with two-grader structure'
)
parser.add_argument(
'experiment_dir',
type=str,
help='Path to the experiment directory containing log and results files'
)
parser.add_argument(
'--metrics',
nargs='+',
default=['QWK', 'Macro_F1', 'Weighted_F1'],
help='Metrics to compute confidence intervals for (default: QWK Macro_F1 Weighted_F1)'
)
parser.add_argument(
'--n-bootstrap',
type=int,
default=1000,
help='Number of bootstrap samples (default: 1000)'
)
parser.add_argument(
'--confidence-level',
type=float,
default=0.95,
help='Confidence level for intervals (default: 0.95)'
)
parser.add_argument(
'--seed',
type=int,
default=42,
help='Random seed for reproducibility (default: 42)'
)
args = parser.parse_args()
# Convert to Path object
exp_dir = Path(args.experiment_dir)
if not exp_dir.exists():
raise FileNotFoundError(f"Experiment directory not found: {exp_dir}")
# Find log file
log_files = list(exp_dir.glob("*run_inference_experiment.log"))
if not log_files:
raise FileNotFoundError(f"No log file found in {exp_dir}")
log_path = log_files[0]
# Find inference results file
results_files = list(exp_dir.glob("*_inference_results.jsonl"))
if not results_files:
raise FileNotFoundError(f"No inference results file found in {exp_dir}")
results_path = results_files[0]
# Extract experiment ID from directory or file name
experiment_id = exp_dir.name
print(f"Processing experiment: {experiment_id}")
print(f"Log file: {log_path}")
print(f"Results file: {results_path}")
# Extract configuration from log
try:
config_dict = extract_config_from_log(log_path)
# Convert to OmegaConf DictConfig for compatibility with compute_metrics
cfg = OmegaConf.create(config_dict)
seed = cfg.experiments.get('training_params', {}).get('seed', args.seed)
except Exception as e:
raise RuntimeError(f"Failed to extract configuration from log file: {e}")
# Load predictions and labels by grader
grader_a_data, grader_b_data = load_inference_results_by_grader(results_path)
print(f"Loaded {len(grader_a_data)} essays with data from both graders")
# Compute bootstrap confidence intervals with two-grader structure
ci_results = compute_bootstrap_confidence_intervals_two_graders(
grader_a_data=grader_a_data,
grader_b_data=grader_b_data,
metrics_to_compute=args.metrics,
cfg=cfg,
n_bootstrap=args.n_bootstrap,
confidence_level=args.confidence_level,
random_state=seed,
)
# Also compute metrics for the full dataset (without bootstrap) for reference
all_predictions_a = np.array([data['prediction'] for data in grader_a_data.values()])
all_labels_a = np.array([data['label'] for data in grader_a_data.values()])
all_predictions_b = np.array([data['prediction'] for data in grader_b_data.values()])
all_labels_b = np.array([data['label'] for data in grader_b_data.values()])
metrics_full_a = compute_metrics((all_predictions_a, all_labels_a), cfg)
metrics_full_b = compute_metrics((all_predictions_b, all_labels_b), cfg)
print("\nFull Dataset Metrics:")
print(" Grader A:")
for metric in args.metrics:
if metric in metrics_full_a:
print(f" {metric}: {metrics_full_a[metric]:.4f}")
print(" Grader B:")
for metric in args.metrics:
if metric in metrics_full_b:
print(f" {metric}: {metrics_full_b[metric]:.4f}")
print(" Mean (A+B)/2:")
for metric in args.metrics:
if metric in metrics_full_a and metric in metrics_full_b:
mean_val = (metrics_full_a[metric] + metrics_full_b[metric]) / 2
print(f" {metric}: {mean_val:.4f}")
# Display bootstrap results
print(f"\nBootstrap Confidence Intervals ({args.confidence_level*100:.0f}%):")
print(" (Based on mean of grader A and B metrics)")
for metric, (mean_val, lower, upper) in ci_results.items():
print(f" {metric}: {mean_val:.4f} [{lower:.4f}, {upper:.4f}]")
# Save results
output_path = exp_dir / "bootstrap_confidence_intervals.csv"
save_results_to_csv(experiment_id, ci_results, output_path)
if __name__ == "__main__":
main()