File size: 12,418 Bytes
75c6626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec168b6
75c6626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626023
75c6626
1626023
75c6626
 
1626023
 
75c6626
1626023
 
75c6626
 
 
 
1626023
ec168b6
 
 
 
1626023
ec168b6
1626023
 
 
 
 
 
 
 
 
75c6626
 
1626023
 
 
75c6626
 
 
 
 
 
 
1626023
 
 
 
 
 
 
75c6626
 
1626023
 
75c6626
 
 
 
 
 
 
 
 
 
 
 
1626023
 
 
 
 
75c6626
 
 
 
1626023
 
 
 
 
 
 
 
75c6626
1626023
 
 
 
 
 
75c6626
1626023
 
 
 
 
 
 
 
 
 
 
75c6626
1626023
 
 
75c6626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626023
75c6626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626023
 
 
75c6626
1626023
 
 
 
75c6626
 
 
 
 
 
 
1626023
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75c6626
 
 
 
1626023
75c6626
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
import argparse
import json
from pathlib import Path
from typing import Dict, List, Tuple

import numpy as np
import pandas as pd
import yaml
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
from metrics import compute_metrics


def extract_config_from_log(log_path: Path) -> Dict:
    """
    Parse the experiment configuration from run_experiment.log file.
    The config is in YAML format at the beginning of the log file.
    """
    with open(log_path, 'r', encoding="latin1") as f:
        lines = f.readlines()
    
    # Find the start of the YAML config (after the first log line)
    yaml_lines = []
    in_yaml = False
    
    for line in lines:
        # Skip the first log line with timestamp
        if not in_yaml and line.strip() and not line.startswith('['):
            in_yaml = True
        
        # Stop when we hit another log line with timestamp
        if in_yaml and line.startswith('['):
            break
            
        if in_yaml:
            yaml_lines.append(line)
    
    # Parse the YAML content
    yaml_content = ''.join(yaml_lines)
    config_dict = yaml.safe_load(yaml_content)
    
    # Convert to OmegaConf DictConfig
    return OmegaConf.create(config_dict)


def load_inference_results_by_grader(jsonl_path: Path) -> Tuple[Dict[str, Dict], Dict[str, Dict]]:
    """
    Load predictions and labels from the inference results JSONL file, organized by grader.
    
    Returns:
        grader_a_data: Dictionary mapping essay_id to {'prediction': score, 'label': label}
        grader_b_data: Dictionary mapping essay_id to {'prediction': score, 'label': label}
    """
    grader_a_data = {}
    grader_b_data = {}
    
    with open(jsonl_path, 'r', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line.strip())
            essay_id = (data['id'], data['id_prompt'], data['essay_text'])
            # Determine prediction field based on model type
            model_types = ["gpt", "sabia", "deepseek"]
            prediction_field = "pontuacao" if any(model in jsonl_path.name for model in model_types) else "prediction"
            prediction = data[prediction_field]
            essay_data = {
                'prediction': prediction,
                'label': data['label']
            }
            
            if data['reference'] == 'grader_a':
                grader_a_data[essay_id] = essay_data
            elif data['reference'] == 'grader_b':
                grader_b_data[essay_id] = essay_data
    assert len(grader_a_data) == len(grader_b_data), "Mismatch in number of essays graded by A and B"
    return grader_a_data, grader_b_data


def compute_bootstrap_confidence_intervals_two_graders(
    grader_a_data: Dict[str, Dict],
    grader_b_data: Dict[str, Dict],
    metrics_to_compute: List[str],
    cfg: DictConfig,
    n_bootstrap: int = 1000,
    confidence_level: float = 0.95,
    random_state: int = 42,
) -> Dict[str, Tuple[float, float, float]]:
    """
    Compute bootstrap confidence intervals for specified metrics using two-grader structure.
    
    For each bootstrap sample:
    1. Sample essay IDs with replacement
    2. For each sampled essay ID, get both grader A and grader B predictions/labels
    3. Compute metrics separately for grader A and grader B
    4. Take the mean of the two grader metrics
    
    Parameters:
        grader_a_data: Dictionary mapping essay_id to prediction/label for grader A
        grader_b_data: Dictionary mapping essay_id to prediction/label for grader B
        metrics_to_compute: List of metric names to compute CIs for
        cfg: Configuration object
        n_bootstrap: Number of bootstrap samples
        confidence_level: Confidence level (default 0.95 for 95% CI)
        random_state: Random seed for reproducibility
    
    Returns:
        Dictionary mapping metric names to (mean, lower_bound, upper_bound)
    """
    if random_state is not None:
        np.random.seed(random_state)
    
    # Get common essay IDs (should be the same for both graders)
    essay_ids = list(grader_a_data.keys())
    assert set(essay_ids) == set(grader_b_data.keys()), "Essay IDs don't match between graders"
    
    n_essays = len(essay_ids)
    bootstrap_metrics = {metric: [] for metric in metrics_to_compute}
    
    # Perform bootstrap sampling
    for _ in tqdm(range(n_bootstrap), desc="Performing Bootstrap samples"):
        # Sample indices with replacement
        sampled_indices = np.random.choice(n_essays, size=n_essays, replace=True)
        
        # Collect predictions and labels for both graders
        grader_a_predictions = []
        grader_a_labels = []
        grader_b_predictions = []
        grader_b_labels = []
        
        for idx in sampled_indices:
            essay_id = essay_ids[idx]
            grader_a_predictions.append(grader_a_data[essay_id]['prediction'])
            grader_a_labels.append(grader_a_data[essay_id]['label'])
            grader_b_predictions.append(grader_b_data[essay_id]['prediction'])
            grader_b_labels.append(grader_b_data[essay_id]['label'])
        
        # Convert to numpy arrays
        grader_a_predictions = np.array(grader_a_predictions)
        grader_a_labels = np.array(grader_a_labels)
        grader_b_predictions = np.array(grader_b_predictions)
        grader_b_labels = np.array(grader_b_labels)
        
        # Compute metrics for each grader
        metrics_a = compute_metrics((grader_a_predictions, grader_a_labels), cfg)
        metrics_b = compute_metrics((grader_b_predictions, grader_b_labels), cfg)
        
        # Compute mean of the two grader metrics
        for metric in metrics_to_compute:
            if metric in metrics_a and metric in metrics_b:
                mean_metric = (metrics_a[metric] + metrics_b[metric]) / 2
                bootstrap_metrics[metric].append(mean_metric)
    
    # Calculate confidence intervals
    alpha = 1 - confidence_level
    lower_percentile = (alpha / 2) * 100
    upper_percentile = (1 - alpha / 2) * 100
    
    ci_results = {}
    for metric, values in bootstrap_metrics.items():
        if values:  # Check if we have values for this metric
            values_array = np.array(values)
            mean_val = np.mean(values_array)
            lower_bound = np.percentile(values_array, lower_percentile)
            upper_bound = np.percentile(values_array, upper_percentile)
            ci_results[metric] = (mean_val, lower_bound, upper_bound)
    
    return ci_results


def save_results_to_csv(
    experiment_id: str,
    ci_results: Dict[str, Tuple[float, float, float]],
    output_path: Path,
) -> None:
    """
    Save bootstrap CI results to CSV file with flattened structure.
    """
    from datetime import datetime
    
    # Create a single row with all metrics
    row_data = {
        'experiment_id': experiment_id,
        'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    }
    
    # Add each metric's results as separate columns
    for metric, (mean_val, lower, upper) in ci_results.items():
        row_data[f'{metric}_mean'] = mean_val
        row_data[f'{metric}_lower_95ci'] = lower
        row_data[f'{metric}_upper_95ci'] = upper
        row_data[f'{metric}_ci_width'] = upper - lower
    
    # Convert to DataFrame with a single row
    df = pd.DataFrame([row_data])
    
    # Save to CSV
    df.to_csv(output_path, index=False)
    print(f"Results saved to {output_path}")


def main():
    """
    # Basic usage
    python compute_bootstrap_ci.py /path/to/experiment/directory

    # With custom parameters
    python compute_bootstrap_ci.py /path/to/experiment/directory \
        --metrics QWK accuracy RMSE HDIV \
        --n-bootstrap 2000 \
        --confidence-level 0.99
    """
    parser = argparse.ArgumentParser(
        description='Compute bootstrap confidence intervals for API model inference results with two-grader structure'
    )
    parser.add_argument(
        'experiment_dir',
        type=str,
        help='Path to the experiment directory containing log and results files'
    )
    parser.add_argument(
        '--metrics',
        nargs='+',
        default=['QWK', 'Macro_F1', 'Weighted_F1'],
        help='Metrics to compute confidence intervals for (default: QWK Macro_F1 Weighted_F1)'
    )
    parser.add_argument(
        '--n-bootstrap',
        type=int,
        default=1000,
        help='Number of bootstrap samples (default: 1000)'
    )
    parser.add_argument(
        '--confidence-level',
        type=float,
        default=0.95,
        help='Confidence level for intervals (default: 0.95)'
    )
    parser.add_argument(
        '--seed',
        type=int,
        default=42,
        help='Random seed for reproducibility (default: 42)'
    )
    
    args = parser.parse_args()
    
    # Convert to Path object
    exp_dir = Path(args.experiment_dir)
    if not exp_dir.exists():
        raise FileNotFoundError(f"Experiment directory not found: {exp_dir}")
    
    # Find log file
    log_files = list(exp_dir.glob("*run_inference_experiment.log"))
    if not log_files:
        raise FileNotFoundError(f"No log file found in {exp_dir}")
    log_path = log_files[0]
    
    # Find inference results file
    results_files = list(exp_dir.glob("*_inference_results.jsonl"))
    if not results_files:
        raise FileNotFoundError(f"No inference results file found in {exp_dir}")
    results_path = results_files[0]
    
    # Extract experiment ID from directory or file name
    experiment_id = exp_dir.name
    
    print(f"Processing experiment: {experiment_id}")
    print(f"Log file: {log_path}")
    print(f"Results file: {results_path}")
    
    # Extract configuration from log
    try:
        config_dict = extract_config_from_log(log_path)
        # Convert to OmegaConf DictConfig for compatibility with compute_metrics
        cfg = OmegaConf.create(config_dict)
        seed = cfg.experiments.get('training_params', {}).get('seed', args.seed)
    except Exception as e:
        raise RuntimeError(f"Failed to extract configuration from log file: {e}")
    
    # Load predictions and labels by grader
    grader_a_data, grader_b_data = load_inference_results_by_grader(results_path)
    print(f"Loaded {len(grader_a_data)} essays with data from both graders")
    
    # Compute bootstrap confidence intervals with two-grader structure
    ci_results = compute_bootstrap_confidence_intervals_two_graders(
        grader_a_data=grader_a_data,
        grader_b_data=grader_b_data,
        metrics_to_compute=args.metrics,
        cfg=cfg,
        n_bootstrap=args.n_bootstrap,
        confidence_level=args.confidence_level,
        random_state=seed,
    )
    
    # Also compute metrics for the full dataset (without bootstrap) for reference
    all_predictions_a = np.array([data['prediction'] for data in grader_a_data.values()])
    all_labels_a = np.array([data['label'] for data in grader_a_data.values()])
    all_predictions_b = np.array([data['prediction'] for data in grader_b_data.values()])
    all_labels_b = np.array([data['label'] for data in grader_b_data.values()])
    
    metrics_full_a = compute_metrics((all_predictions_a, all_labels_a), cfg)
    metrics_full_b = compute_metrics((all_predictions_b, all_labels_b), cfg)
    
    print("\nFull Dataset Metrics:")
    print("  Grader A:")
    for metric in args.metrics:
        if metric in metrics_full_a:
            print(f"    {metric}: {metrics_full_a[metric]:.4f}")
    print("  Grader B:")
    for metric in args.metrics:
        if metric in metrics_full_b:
            print(f"    {metric}: {metrics_full_b[metric]:.4f}")
    print("  Mean (A+B)/2:")
    for metric in args.metrics:
        if metric in metrics_full_a and metric in metrics_full_b:
            mean_val = (metrics_full_a[metric] + metrics_full_b[metric]) / 2
            print(f"    {metric}: {mean_val:.4f}")
    
    # Display bootstrap results
    print(f"\nBootstrap Confidence Intervals ({args.confidence_level*100:.0f}%):")
    print("  (Based on mean of grader A and B metrics)")
    for metric, (mean_val, lower, upper) in ci_results.items():
        print(f"  {metric}: {mean_val:.4f} [{lower:.4f}, {upper:.4f}]")
    
    # Save results
    output_path = exp_dir / "bootstrap_confidence_intervals.csv"
    save_results_to_csv(experiment_id, ci_results, output_path)


if __name__ == "__main__":
    main()