ClarusC64's picture
Create scorer.py
27e4243 verified
import json
import sys
import pandas as pd
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
REQ_PRED = {"scenario_id", "prediction"}
REQ_TRUTH = {"scenario_id", "label_intervention_stabilizing"}
def load_csv(path):
return pd.read_csv(path)
def validate_columns(df, req, name):
missing = req - set(df.columns)
if missing:
raise ValueError(f"{name} missing columns: {sorted(missing)}")
def validate_ids(df, name):
if df["scenario_id"].isna().any():
raise ValueError(f"{name} has missing scenario_id values")
ids = df["scenario_id"].astype(str).str.strip()
if (ids == "").any():
raise ValueError(f"{name} has blank scenario_id values")
if ids.duplicated().any():
dupes = ids[ids.duplicated()].unique().tolist()
raise ValueError(f"{name} has duplicate scenario_id values: {dupes[:10]}")
df = df.copy()
df["scenario_id"] = ids
return df
def validate_binary(series, name):
values = set(series.dropna().tolist())
if not values.issubset({0, 1}):
raise ValueError(f"{name} must contain only 0/1 values, found: {sorted(values)}")
def prepare_prediction_df(pred_df):
validate_columns(pred_df, REQ_PRED, "predictions")
pred_df = validate_ids(pred_df, "predictions")
if pred_df["prediction"].isna().any():
raise ValueError("predictions has missing prediction values")
pred_df = pred_df.copy()
pred_df["prediction"] = pred_df["prediction"].astype(int)
validate_binary(pred_df["prediction"], "prediction")
return pred_df[["scenario_id", "prediction"]]
def prepare_truth_df(truth_df):
validate_columns(truth_df, REQ_TRUTH, "truth")
truth_df = validate_ids(truth_df, "truth")
if truth_df["label_intervention_stabilizing"].isna().any():
raise ValueError("truth has missing label_intervention_stabilizing values")
truth_df = truth_df.copy()
truth_df["label_intervention_stabilizing"] = truth_df["label_intervention_stabilizing"].astype(int)
validate_binary(truth_df["label_intervention_stabilizing"], "label_intervention_stabilizing")
return truth_df
def confusion_counts(y_true, y_pred):
tp = int(((y_true == 1) & (y_pred == 1)).sum())
tn = int(((y_true == 0) & (y_pred == 0)).sum())
fp = int(((y_true == 0) & (y_pred == 1)).sum())
fn = int(((y_true == 1) & (y_pred == 0)).sum())
return tp, tn, fp, fn
def score(predictions_path, truth_path):
pred_df = load_csv(predictions_path)
truth_df = load_csv(truth_path)
pred_df = prepare_prediction_df(pred_df)
truth_df = prepare_truth_df(truth_df)
pred_ids = set(pred_df["scenario_id"])
truth_ids = set(truth_df["scenario_id"])
if pred_ids != truth_ids:
only_pred = sorted(pred_ids - truth_ids)[:10]
only_truth = sorted(truth_ids - pred_ids)[:10]
raise ValueError(
"scenario_id mismatch between predictions and truth. "
f"Only in predictions: {only_pred}. Only in truth: {only_truth}."
)
merged = truth_df.merge(pred_df, on="scenario_id", how="inner")
merged = merged.sort_values("scenario_id").reset_index(drop=True)
y_true = merged["label_intervention_stabilizing"]
y_pred = merged["prediction"]
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred, zero_division=0)
recall = recall_score(y_true, y_pred, zero_division=0)
f1 = f1_score(y_true, y_pred, zero_division=0)
tp, tn, fp, fn = confusion_counts(y_true, y_pred)
successful = merged[merged["label_intervention_stabilizing"] == 1]
harmful = merged[merged["label_intervention_stabilizing"] == 0]
recall_stabilizing_interventions = (
len(successful[successful["prediction"] == 1]) / max(len(successful), 1)
)
false_effective_intervention_rate = (
len(harmful[harmful["prediction"] == 1]) / max(len(harmful), 1)
)
results = {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1": f1,
"recall_stabilizing_interventions": recall_stabilizing_interventions,
"false_effective_intervention_rate": false_effective_intervention_rate,
"confusion_matrix": {
"true_positive": tp,
"true_negative": tn,
"false_positive": fp,
"false_negative": fn,
},
"support": {
"total_rows": int(len(merged)),
"positive_rows": int((y_true == 1).sum()),
"negative_rows": int((y_true == 0).sum()),
},
}
return results
def main():
if len(sys.argv) != 3:
print(
"Usage: py scorer.py <predictions.csv> <truth.csv>",
file=sys.stderr,
)
sys.exit(1)
predictions_path = sys.argv[1]
truth_path = sys.argv[2]
results = score(predictions_path, truth_path)
print(json.dumps(results, indent=2))
if __name__ == "__main__":
main()