Spaces:
Running
Running
| import pandas as pd | |
| import os | |
| from src.display.utils import AutoEvalColumn, ModelType | |
| from src.about import Tasks | |
| def get_leaderboard_df(cols: list, benchmark_cols: list) -> pd.DataFrame: | |
| """Creates a dataframe from the static results.csv""" | |
| csv_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "results.csv") | |
| df = pd.read_csv(csv_path) | |
| # Add model_type_symbol based on model_type | |
| def get_type_with_symbol(mtype): | |
| return ModelType.from_str(str(mtype)).to_str() | |
| df["model_type"] = df["model_type"].apply(get_type_with_symbol) | |
| # Sort by pass_rate | |
| if "pass_rate" in df.columns: | |
| df = df.sort_values(by=["pass_rate"], ascending=False) | |
| # Rename columns to match the expected names in AutoEvalColumn | |
| rename_map = { | |
| "model": AutoEvalColumn.model.name, | |
| "model_type": AutoEvalColumn.model_type.name, | |
| "params": AutoEvalColumn.params.name, | |
| "license": AutoEvalColumn.license.name, | |
| } | |
| # Add tasks to rename map | |
| for task in Tasks: | |
| # task.value.benchmark is the column name in csv (e.g., "pass_rate") | |
| # task.value.col_name is the display name (e.g., "Pass Rate (%)") | |
| rename_map[task.value.benchmark] = task.value.col_name | |
| df = df.rename(columns=rename_map) | |
| # Ensure all required columns exist, fill missing with NaN | |
| for col in cols: | |
| if col not in df.columns: | |
| df[col] = None | |
| df = df[cols].round(decimals=2) | |
| return df | |