Spaces:
Runtime error
Runtime error
Commit
·
0637d24
1
Parent(s):
6992c96
add
Browse files- app.py +1 -1
- src/leaderboard/read_evals.py +3 -2
- src/populate.py +1 -0
app.py
CHANGED
|
@@ -62,7 +62,7 @@ def init_leaderboard(dataframe):
|
|
| 62 |
if dataframe is None or dataframe.empty:
|
| 63 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
| 64 |
|
| 65 |
-
pdb.set_trace()
|
| 66 |
num_cols = dataframe.select_dtypes(include=['float']).columns
|
| 67 |
styler = dataframe.style.format({col: "{:.1f}" for col in num_cols})
|
| 68 |
return gr.components.Dataframe(
|
|
|
|
| 62 |
if dataframe is None or dataframe.empty:
|
| 63 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
| 64 |
|
| 65 |
+
# pdb.set_trace()
|
| 66 |
num_cols = dataframe.select_dtypes(include=['float']).columns
|
| 67 |
styler = dataframe.style.format({col: "{:.1f}" for col in num_cols})
|
| 68 |
return gr.components.Dataframe(
|
src/leaderboard/read_evals.py
CHANGED
|
@@ -6,6 +6,7 @@ from dataclasses import dataclass
|
|
| 6 |
|
| 7 |
import dateutil
|
| 8 |
import numpy as np
|
|
|
|
| 9 |
|
| 10 |
from src.display.formatting import make_clickable_model
|
| 11 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType, Metrics
|
|
@@ -28,7 +29,7 @@ class EvalResult:
|
|
| 28 |
# architecture: str = "Unknown"
|
| 29 |
# license: str = "?"
|
| 30 |
# likes: int = 0
|
| 31 |
-
num_params: int =
|
| 32 |
# date: str = "" # submission date of request file
|
| 33 |
# still_on_hub: bool = False
|
| 34 |
link: str = ''
|
|
@@ -48,7 +49,7 @@ class EvalResult:
|
|
| 48 |
org_and_model = config.get("model_name", config.get("model_args", None))
|
| 49 |
org_and_model = org_and_model.split("/", 1)
|
| 50 |
link = config.get("link", '')
|
| 51 |
-
params = config.get("Params",
|
| 52 |
|
| 53 |
if len(org_and_model) == 1:
|
| 54 |
org = None
|
|
|
|
| 6 |
|
| 7 |
import dateutil
|
| 8 |
import numpy as np
|
| 9 |
+
import pandas as pd
|
| 10 |
|
| 11 |
from src.display.formatting import make_clickable_model
|
| 12 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType, Metrics
|
|
|
|
| 29 |
# architecture: str = "Unknown"
|
| 30 |
# license: str = "?"
|
| 31 |
# likes: int = 0
|
| 32 |
+
num_params: int = pd.NA
|
| 33 |
# date: str = "" # submission date of request file
|
| 34 |
# still_on_hub: bool = False
|
| 35 |
link: str = ''
|
|
|
|
| 49 |
org_and_model = config.get("model_name", config.get("model_args", None))
|
| 50 |
org_and_model = org_and_model.split("/", 1)
|
| 51 |
link = config.get("link", '')
|
| 52 |
+
params = config.get("Params", pd.NA)
|
| 53 |
|
| 54 |
if len(org_and_model) == 1:
|
| 55 |
org = None
|
src/populate.py
CHANGED
|
@@ -10,6 +10,7 @@ from src.leaderboard.read_evals import get_raw_eval_results
|
|
| 10 |
|
| 11 |
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list, task) -> pd.DataFrame:
|
| 12 |
"""Creates a dataframe from all the individual experiment results"""
|
|
|
|
| 13 |
raw_data = get_raw_eval_results(results_path, requests_path, task)
|
| 14 |
all_data_json = [v.to_dict() for v in raw_data]
|
| 15 |
|
|
|
|
| 10 |
|
| 11 |
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list, task) -> pd.DataFrame:
|
| 12 |
"""Creates a dataframe from all the individual experiment results"""
|
| 13 |
+
# import pdb; pdb.set_trace()
|
| 14 |
raw_data = get_raw_eval_results(results_path, requests_path, task)
|
| 15 |
all_data_json = [v.to_dict() for v in raw_data]
|
| 16 |
|