Spaces:
Runtime error
Runtime error
| import json | |
| import os | |
| import pandas as pd | |
| from src.display.formatting import has_no_nan_values, make_clickable_model | |
| from src.display.utils import AutoEvalColumn, EvalQueueColumn | |
| from src.leaderboard.read_evals import get_raw_eval_results | |
| import yaml | |
| from sklearn.metrics import cohen_kappa_score | |
| import numpy as np | |
| TYPES = ["str", "number", "number", "number", "number", "number"] | |
| def read_json(file_path: str) -> list[dict]: | |
| """ | |
| Read a JSON/JSONL file and return its contents as a list of dictionaries. | |
| Parameters: | |
| file_path (str): The path to the JSON file. | |
| Returns: | |
| list[dict]: The contents of the JSON file as a list of dictionaries. | |
| """ | |
| try: | |
| with open(file_path) as f: | |
| data = [json.loads(x) for x in f] | |
| return data | |
| except json.decoder.JSONDecodeError: | |
| with open(file_path) as f: | |
| data = json.load(f) | |
| return data | |
| def pairwise_compare( | |
| evaluator1_dir: str, | |
| evaluator2_dir: str, | |
| ) -> tuple[float, float]: | |
| """ | |
| Compare pairwise evaluators. | |
| Args: | |
| evaluator1_dir: The directory containing the responses from the first evaluator. | |
| evaluator2_dir: The directory containing the responses from the second evaluator. | |
| Returns: | |
| None | |
| """ | |
| evaluator1_responses = read_json(evaluator1_dir) | |
| evaluator2_responses = read_json(evaluator2_dir) | |
| assert len(evaluator1_responses) == len(evaluator2_responses) | |
| evaluator1_winners = np.array( | |
| [response["winner"] for response in evaluator1_responses] | |
| ) | |
| evaluator2_winners = np.array( | |
| [response["winner"] for response in evaluator2_responses] | |
| ) | |
| acc = (evaluator1_winners == evaluator2_winners).mean().item() | |
| agreement = cohen_kappa_score(evaluator1_winners, evaluator2_winners) | |
| return acc, agreement | |
| def pairwise_meta_eval( | |
| human_dir: str, | |
| model_dir: str, | |
| model_dir_swap: str | |
| ) -> dict[float]: | |
| """ | |
| Evaluate a pairwise evaluator. | |
| Args: | |
| human_dir: The directory containing the human responses. | |
| model_dir: The directory containing the model responses. | |
| model_dir_swap: The directory containing the model responses with swapped inputs. | |
| Returns: | |
| dict[float]: The accuracy and agreement. | |
| """ | |
| acc, agr = pairwise_compare(human_dir, model_dir) | |
| swap_acc, swap_agr = pairwise_compare( | |
| human_dir, model_dir_swap, | |
| ) | |
| acc = (acc + swap_acc) / 2 | |
| agr = (agr + swap_agr) / 2 | |
| models_acc, models_agr = pairwise_compare( | |
| model_dir, model_dir_swap, | |
| ) | |
| return acc, agr, models_acc, models_agr | |
| def load_leaderboard() -> pd.DataFrame: | |
| """Loads the leaderboard from the file system""" | |
| with open("./data/models.yaml") as fp: | |
| models = yaml.safe_load(fp) | |
| predictions = {k: [] for k in ["Model", "Accuracy", "Agreement", "Self-Accuracy", "Self-Agreement"]} | |
| for model in models: | |
| fdir = model["fdir"] | |
| acc, agr, models_acc, models_agr = pairwise_meta_eval( | |
| f"./data/instrusum.json", | |
| f"./predictions/{fdir}.jsonl", | |
| f"./predictions/{fdir}_swap.jsonl" | |
| ) | |
| predictions["Model"].append(model["name"]) | |
| predictions["Accuracy"].append(acc) | |
| predictions["Agreement"].append(agr) | |
| predictions["Self-Accuracy"].append(models_acc) | |
| predictions["Self-Agreement"].append(models_agr) | |
| return pd.DataFrame(predictions) | |
| def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame: | |
| """Creates a dataframe from all the individual experiment results""" | |
| raw_data = get_raw_eval_results(results_path, requests_path) | |
| all_data_json = [v.to_dict() for v in raw_data] | |
| df = pd.DataFrame.from_records(all_data_json) | |
| df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False) | |
| df = df[cols].round(decimals=2) | |
| # filter out if any of the benchmarks have not been produced | |
| df = df[has_no_nan_values(df, benchmark_cols)] | |
| return raw_data, df | |
| def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]: | |
| """Creates the different dataframes for the evaluation queues requestes""" | |
| entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")] | |
| all_evals = [] | |
| for entry in entries: | |
| if ".json" in entry: | |
| file_path = os.path.join(save_path, entry) | |
| with open(file_path) as fp: | |
| data = json.load(fp) | |
| data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) | |
| data[EvalQueueColumn.revision.name] = data.get("revision", "main") | |
| all_evals.append(data) | |
| elif ".md" not in entry: | |
| # this is a folder | |
| sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")] | |
| for sub_entry in sub_entries: | |
| file_path = os.path.join(save_path, entry, sub_entry) | |
| with open(file_path) as fp: | |
| data = json.load(fp) | |
| data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) | |
| data[EvalQueueColumn.revision.name] = data.get("revision", "main") | |
| all_evals.append(data) | |
| pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]] | |
| running_list = [e for e in all_evals if e["status"] == "RUNNING"] | |
| finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"] | |
| df_pending = pd.DataFrame.from_records(pending_list, columns=cols) | |
| df_running = pd.DataFrame.from_records(running_list, columns=cols) | |
| df_finished = pd.DataFrame.from_records(finished_list, columns=cols) | |
| return df_finished[cols], df_running[cols], df_pending[cols] | |