import json import os import pandas as pd from src.display.formatting import has_no_nan_values, make_clickable_model from src.display.utils import AutoEvalColumn, EvalQueueColumn from src.leaderboard.read_evals import get_raw_eval_results, AUTO_EVAL_FIELDS def get_leaderboard_df( results_path: str, requests_path: str, cols: list, benchmark_cols: list, ) -> pd.DataFrame: """ Build leaderboard DataFrame from eval JSONs. Defensive behavior: - Never raises KeyError due to missing columns - Ensures benchmark columns always exist - Keeps benchmark columns available for downstream analytics """ print(">>> ENTERING get_leaderboard_df") raw_data = get_raw_eval_results(results_path, requests_path) print("Raw eval results length:", len(raw_data)) print("Sample raw data:", raw_data[0] if raw_data else "empty") if not raw_data: # Return empty dataframe with expected structure base_cols = list(dict.fromkeys(cols + benchmark_cols)) return pd.DataFrame(columns=base_cols) # Convert objects to dictionaries records = [] for r in raw_data: try: records.append(r.to_dict()) except Exception: continue if not records: base_cols = list(dict.fromkeys(cols + benchmark_cols)) return pd.DataFrame(columns=base_cols) df = pd.DataFrame.from_records(records) print("RAW RECORD COUNT:", len(records)) print("DF SHAPE AFTER CREATION:", df.shape) print("DF COLUMNS AFTER CREATION:", df.columns.tolist()) # Ensure IS_FS exists for UI compatibility if "IS_FS" not in df.columns: if AUTO_EVAL_FIELDS.get("is_5fewshot") in df.columns: df["IS_FS"] = df[AUTO_EVAL_FIELDS["is_5fewshot"]] elif "is_5fewshot" in df.columns: df["IS_FS"] = df["is_5fewshot"] else: df["IS_FS"] = False # Ensure #Params (B) exists for chart if 'params' in df.columns: df['#Params (B)'] = df['params'] else: df['#Params (B)'] = float('nan') print("------") print(df['#Params (B)']) # Ensure display name for average performance exists if 'average' in df.columns: df['Avg. Comb. Perf. ⬆️'] = df['average'] else: df['Avg. Comb. Perf. ⬆️'] = float('nan') # Ensure display column 'Model' exists if 'model' in df.columns: df['Model'] = df['model'] else: df['Model'] = '' # ------------------------------------------------------------------ # Ensure ALL benchmark columns exist (even if missing in JSON files) # ------------------------------------------------------------------ for bcol in benchmark_cols: if bcol not in df.columns: df[bcol] = float("nan") # ------------------------------------------------------------------ # Ensure requested display columns exist # ------------------------------------------------------------------ for col in cols: if col not in df.columns: df[col] = float("nan") # ------------------------------------------------------------------ # Sort by average if available # ------------------------------------------------------------------ avg_col = AUTO_EVAL_FIELDS.get("average", "average") if avg_col in df.columns: df = df.sort_values(by=avg_col, ascending=False) # ------------------------------------------------------------------ # Filter rows only if benchmark columns actually exist # ------------------------------------------------------------------ existing_benchmarks = [c for c in benchmark_cols if c in df.columns] # if existing_benchmarks: # try: # mask = has_no_nan_values(df, existing_benchmarks) # df = df[mask] # except Exception: # pass print("DF shape after loading JSONs, before benchmark filtering:", df.shape) print("DF columns:", df.columns.tolist()) print("Non-null REL count:", df['REL'].notna().sum() if 'REL' in df else "REL missing") # ------------------------------------------------------------------ # Build final column order (keep benchmarks for analytics) # ------------------------------------------------------------------ final_cols = list(dict.fromkeys( cols + benchmark_cols + ['Rank', 'Size', 'LANG', 'IS_FS', '#Params (B)', 'Avg. Comb. Perf. ⬆️', 'Model'] )) final_cols = [c for c in final_cols if c in df.columns] df = df[final_cols] # ------------------------------------------------------------------ # Round numeric columns safely # ------------------------------------------------------------------ for c in df.columns: if pd.api.types.is_numeric_dtype(df[c]): df[c] = df[c].round(2) df = df.reset_index(drop=True) return df def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]: """Creates the different dataframes for the evaluation queues requestes""" entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")] all_evals = [] for entry in entries: if ".json" in entry: file_path = os.path.join(save_path, entry) with open(file_path) as fp: data = json.load(fp) data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) data[EvalQueueColumn.revision.name] = data.get("revision", "main") all_evals.append(data) elif ".md" not in entry: # this is a folder sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")] for sub_entry in sub_entries: file_path = os.path.join(save_path, entry, sub_entry) with open(file_path) as fp: data = json.load(fp) data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) data[EvalQueueColumn.revision.name] = data.get("revision", "main") all_evals.append(data) pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]] running_list = [e for e in all_evals if e["status"] == "RUNNING"] finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"] df_pending = pd.DataFrame.from_records(pending_list, columns=cols) df_running = pd.DataFrame.from_records(running_list, columns=cols) df_finished = pd.DataFrame.from_records(finished_list, columns=cols) return df_finished[cols], df_running[cols], df_pending[cols]