Ray0202 commited on
Commit ·
2c6288c
1
Parent(s): 67678da
Update space
Browse files- README.md +36 -25
- app.py +97 -153
- requirements.txt +0 -13
- src/about.py +16 -59
- src/display/css_html_js.py +1 -1
- src/display/utils.py +0 -110
- src/envs.py +2 -23
- src/leaderboard/read_evals.py +0 -196
- src/populate.py +0 -58
- src/submission/check_validity.py +0 -99
- src/submission/submit.py +0 -119
README.md
CHANGED
|
@@ -7,42 +7,53 @@ sdk: gradio
|
|
| 7 |
app_file: app.py
|
| 8 |
pinned: true
|
| 9 |
license: apache-2.0
|
| 10 |
-
short_description:
|
| 11 |
sdk_version: 5.43.1
|
| 12 |
tags:
|
| 13 |
- leaderboard
|
| 14 |
---
|
| 15 |
|
| 16 |
-
#
|
| 17 |
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
Results files should have the following format and be stored as json files:
|
| 21 |
```json
|
| 22 |
{
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
}
|
| 35 |
-
}
|
| 36 |
}
|
| 37 |
```
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
|
|
|
| 42 |
|
| 43 |
-
#
|
| 44 |
|
| 45 |
-
|
| 46 |
-
-
|
| 47 |
-
-
|
| 48 |
-
-
|
|
|
|
|
|
| 7 |
app_file: app.py
|
| 8 |
pinned: true
|
| 9 |
license: apache-2.0
|
| 10 |
+
short_description: Read-only TemporalBench leaderboard for offline evaluation results.
|
| 11 |
sdk_version: 5.43.1
|
| 12 |
tags:
|
| 13 |
- leaderboard
|
| 14 |
---
|
| 15 |
|
| 16 |
+
# TemporalBench Leaderboard
|
| 17 |
|
| 18 |
+
This Space is a read-only visualization and validation layer for **offline** TemporalBench results.
|
| 19 |
+
It does not execute agents, call LLM APIs, or accept API keys.
|
| 20 |
+
|
| 21 |
+
## Configuration
|
| 22 |
+
|
| 23 |
+
- Set the local results file path via `TEMPORALBENCH_RESULTS_PATH`.
|
| 24 |
+
Default is `data/results.json`.
|
| 25 |
+
- Update descriptive text in `src/about.py`.
|
| 26 |
+
|
| 27 |
+
## Results File Format
|
| 28 |
+
|
| 29 |
+
Results must be a JSON list or CSV table, where each record is one agent configuration.
|
| 30 |
+
Required fields per record:
|
| 31 |
|
|
|
|
| 32 |
```json
|
| 33 |
{
|
| 34 |
+
"model_name": "string",
|
| 35 |
+
"agent_name": "string",
|
| 36 |
+
"agent_type": "string",
|
| 37 |
+
"base_model": "string",
|
| 38 |
+
"T1_acc": 0.0,
|
| 39 |
+
"T2_acc": 0.0,
|
| 40 |
+
"T3_acc": 0.0,
|
| 41 |
+
"T4_acc": 0.0,
|
| 42 |
+
"T2_MAE": 0.0,
|
| 43 |
+
"T4_sMAPE": 0.0,
|
| 44 |
+
"Retail_T3_acc": 0.0
|
|
|
|
|
|
|
| 45 |
}
|
| 46 |
```
|
| 47 |
|
| 48 |
+
Notes:
|
| 49 |
+
- `T2_MAE` and `T4_sMAPE` are optional.
|
| 50 |
+
- Any additional numeric columns are treated as optional domain metrics and will be shown.
|
| 51 |
+
- Records must have a consistent schema and numeric metric values.
|
| 52 |
|
| 53 |
+
## Project Structure
|
| 54 |
|
| 55 |
+
- `app.py`: Gradio UI + leaderboard rendering
|
| 56 |
+
- `src/leaderboard/load_results.py`: Load + validate results
|
| 57 |
+
- `src/leaderboard/schema.py`: Identity/metric field definitions
|
| 58 |
+
- `src/about.py`: Text and descriptions
|
| 59 |
+
- `src/display/css_html_js.py`: Custom styling
|
app.py
CHANGED
|
@@ -1,192 +1,136 @@
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
|
| 3 |
import pandas as pd
|
| 4 |
-
from
|
| 5 |
-
from huggingface_hub import snapshot_download
|
| 6 |
|
| 7 |
from src.about import (
|
| 8 |
CITATION_BUTTON_LABEL,
|
| 9 |
CITATION_BUTTON_TEXT,
|
| 10 |
-
EVALUATION_QUEUE_TEXT,
|
| 11 |
INTRODUCTION_TEXT,
|
| 12 |
LLM_BENCHMARKS_TEXT,
|
| 13 |
TITLE,
|
| 14 |
)
|
| 15 |
from src.display.css_html_js import custom_css
|
| 16 |
-
from src.
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
)
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
try:
|
| 44 |
-
print(EVAL_RESULTS_PATH)
|
| 45 |
-
snapshot_download(
|
| 46 |
-
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
| 47 |
-
)
|
| 48 |
-
except Exception:
|
| 49 |
-
restart_space()
|
| 50 |
|
| 51 |
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
(
|
| 55 |
-
finished_eval_queue_df,
|
| 56 |
-
running_eval_queue_df,
|
| 57 |
-
pending_eval_queue_df,
|
| 58 |
-
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
| 59 |
|
| 60 |
-
def init_leaderboard(dataframe):
|
| 61 |
if dataframe is None or dataframe.empty:
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
return Leaderboard(
|
| 64 |
value=dataframe,
|
| 65 |
-
datatype=
|
| 66 |
select_columns=SelectColumns(
|
| 67 |
-
default_selection=
|
| 68 |
-
cant_deselect=
|
| 69 |
label="Select Columns to Display:",
|
| 70 |
),
|
| 71 |
-
search_columns=[
|
| 72 |
-
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
| 73 |
filter_columns=[
|
| 74 |
-
ColumnFilter(
|
| 75 |
-
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
| 76 |
-
ColumnFilter(
|
| 77 |
-
AutoEvalColumn.params.name,
|
| 78 |
-
type="slider",
|
| 79 |
-
min=0.01,
|
| 80 |
-
max=150,
|
| 81 |
-
label="Select the number of parameters (B)",
|
| 82 |
-
),
|
| 83 |
-
ColumnFilter(
|
| 84 |
-
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
|
| 85 |
-
),
|
| 86 |
],
|
| 87 |
-
bool_checkboxgroup_label="Hide models",
|
| 88 |
interactive=False,
|
| 89 |
)
|
| 90 |
|
| 91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
demo = gr.Blocks(css=custom_css)
|
| 93 |
with demo:
|
| 94 |
gr.HTML(TITLE)
|
| 95 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
|
|
|
|
|
|
| 96 |
|
| 97 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 98 |
-
with gr.TabItem("🏅
|
| 99 |
-
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
| 100 |
-
|
| 101 |
-
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
| 102 |
-
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 103 |
-
|
| 104 |
-
with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
| 105 |
-
with gr.Column():
|
| 106 |
-
with gr.Row():
|
| 107 |
-
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
| 108 |
-
|
| 109 |
-
with gr.Column():
|
| 110 |
-
with gr.Accordion(
|
| 111 |
-
f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
|
| 112 |
-
open=False,
|
| 113 |
-
):
|
| 114 |
-
with gr.Row():
|
| 115 |
-
finished_eval_table = gr.components.Dataframe(
|
| 116 |
-
value=finished_eval_queue_df,
|
| 117 |
-
headers=EVAL_COLS,
|
| 118 |
-
datatype=EVAL_TYPES,
|
| 119 |
-
row_count=5,
|
| 120 |
-
)
|
| 121 |
-
with gr.Accordion(
|
| 122 |
-
f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
|
| 123 |
-
open=False,
|
| 124 |
-
):
|
| 125 |
-
with gr.Row():
|
| 126 |
-
running_eval_table = gr.components.Dataframe(
|
| 127 |
-
value=running_eval_queue_df,
|
| 128 |
-
headers=EVAL_COLS,
|
| 129 |
-
datatype=EVAL_TYPES,
|
| 130 |
-
row_count=5,
|
| 131 |
-
)
|
| 132 |
-
|
| 133 |
-
with gr.Accordion(
|
| 134 |
-
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
| 135 |
-
open=False,
|
| 136 |
-
):
|
| 137 |
-
with gr.Row():
|
| 138 |
-
pending_eval_table = gr.components.Dataframe(
|
| 139 |
-
value=pending_eval_queue_df,
|
| 140 |
-
headers=EVAL_COLS,
|
| 141 |
-
datatype=EVAL_TYPES,
|
| 142 |
-
row_count=5,
|
| 143 |
-
)
|
| 144 |
-
with gr.Row():
|
| 145 |
-
gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
with gr.Row():
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
interactive=True,
|
| 157 |
-
)
|
| 158 |
-
|
| 159 |
-
with gr.Column():
|
| 160 |
-
precision = gr.Dropdown(
|
| 161 |
-
choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
| 162 |
-
label="Precision",
|
| 163 |
-
multiselect=False,
|
| 164 |
-
value="float16",
|
| 165 |
-
interactive=True,
|
| 166 |
-
)
|
| 167 |
-
weight_type = gr.Dropdown(
|
| 168 |
-
choices=[i.value.name for i in WeightType],
|
| 169 |
-
label="Weights type",
|
| 170 |
-
multiselect=False,
|
| 171 |
-
value="Original",
|
| 172 |
-
interactive=True,
|
| 173 |
-
)
|
| 174 |
-
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
| 175 |
-
|
| 176 |
-
submit_button = gr.Button("Submit Eval")
|
| 177 |
-
submission_result = gr.Markdown()
|
| 178 |
-
submit_button.click(
|
| 179 |
-
add_new_eval,
|
| 180 |
-
[
|
| 181 |
-
model_name_textbox,
|
| 182 |
-
base_model_name_textbox,
|
| 183 |
-
revision_name_textbox,
|
| 184 |
-
precision,
|
| 185 |
-
weight_type,
|
| 186 |
-
model_type,
|
| 187 |
-
],
|
| 188 |
-
submission_result,
|
| 189 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
|
| 191 |
with gr.Row():
|
| 192 |
with gr.Accordion("📙 Citation", open=False):
|
|
@@ -201,4 +145,4 @@ with demo:
|
|
| 201 |
scheduler = BackgroundScheduler()
|
| 202 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
| 203 |
scheduler.start()
|
| 204 |
-
demo.queue(default_concurrency_limit=40).launch()
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
import gradio as gr
|
|
|
|
| 4 |
import pandas as pd
|
| 5 |
+
from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns
|
|
|
|
| 6 |
|
| 7 |
from src.about import (
|
| 8 |
CITATION_BUTTON_LABEL,
|
| 9 |
CITATION_BUTTON_TEXT,
|
|
|
|
| 10 |
INTRODUCTION_TEXT,
|
| 11 |
LLM_BENCHMARKS_TEXT,
|
| 12 |
TITLE,
|
| 13 |
)
|
| 14 |
from src.display.css_html_js import custom_css
|
| 15 |
+
from src.envs import RESULTS_PATH
|
| 16 |
+
from src.leaderboard.load_results import ResultsValidationError, build_dataframe, load_records
|
| 17 |
+
from src.leaderboard.schema import SCHEMA
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def load_leaderboard_data() -> tuple[pd.DataFrame, list[str], Optional[str]]:
|
| 21 |
+
try:
|
| 22 |
+
records = load_records(RESULTS_PATH)
|
| 23 |
+
df, column_order = build_dataframe(records)
|
| 24 |
+
return df, column_order, None
|
| 25 |
+
except ResultsValidationError as exc:
|
| 26 |
+
fallback_cols = list(SCHEMA.identity_fields) + list(SCHEMA.required_metrics)
|
| 27 |
+
df = pd.DataFrame(columns=fallback_cols)
|
| 28 |
+
return df, fallback_cols, str(exc)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
LEADERBOARD_DF, COLUMN_ORDER, LOAD_ERROR = load_leaderboard_data()
|
| 32 |
+
METRIC_COLUMNS = [c for c in COLUMN_ORDER if c not in SCHEMA.identity_fields]
|
| 33 |
+
|
| 34 |
+
COMPARE_OPTIONS = []
|
| 35 |
+
COMPARE_LOOKUP = {}
|
| 36 |
+
for idx, row in LEADERBOARD_DF.iterrows():
|
| 37 |
+
label = (
|
| 38 |
+
f"{row['agent_name']} | {row['model_name']} | {row['agent_type']} | {row['base_model']} ({idx})"
|
| 39 |
)
|
| 40 |
+
COMPARE_OPTIONS.append(label)
|
| 41 |
+
COMPARE_LOOKUP[label] = row.to_dict()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
|
| 44 |
+
def column_types(column_order: list[str]) -> list[str]:
|
| 45 |
+
types = []
|
| 46 |
+
for col in column_order:
|
| 47 |
+
if col in SCHEMA.identity_fields:
|
| 48 |
+
types.append("str")
|
| 49 |
+
else:
|
| 50 |
+
types.append("number")
|
| 51 |
+
return types
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
+
def init_leaderboard(dataframe, column_order):
|
| 55 |
if dataframe is None or dataframe.empty:
|
| 56 |
+
dataframe = pd.DataFrame(columns=column_order)
|
| 57 |
+
|
| 58 |
+
required_cols = list(SCHEMA.identity_fields) + list(SCHEMA.required_metrics)
|
| 59 |
+
cant_deselect = [c for c in required_cols if c in column_order]
|
| 60 |
+
|
| 61 |
return Leaderboard(
|
| 62 |
value=dataframe,
|
| 63 |
+
datatype=column_types(column_order),
|
| 64 |
select_columns=SelectColumns(
|
| 65 |
+
default_selection=column_order,
|
| 66 |
+
cant_deselect=cant_deselect,
|
| 67 |
label="Select Columns to Display:",
|
| 68 |
),
|
| 69 |
+
search_columns=["model_name", "agent_name"],
|
|
|
|
| 70 |
filter_columns=[
|
| 71 |
+
ColumnFilter("agent_type", type="checkboxgroup", label="Agent type"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
],
|
|
|
|
| 73 |
interactive=False,
|
| 74 |
)
|
| 75 |
|
| 76 |
|
| 77 |
+
def compare_entries(entry_a: str, entry_b: str) -> pd.DataFrame:
|
| 78 |
+
if not entry_a or not entry_b:
|
| 79 |
+
return pd.DataFrame(columns=["metric", "entry_a", "entry_b", "delta"])
|
| 80 |
+
row_a = COMPARE_LOOKUP.get(entry_a)
|
| 81 |
+
row_b = COMPARE_LOOKUP.get(entry_b)
|
| 82 |
+
if row_a is None or row_b is None:
|
| 83 |
+
return pd.DataFrame(columns=["metric", "entry_a", "entry_b", "delta"])
|
| 84 |
+
|
| 85 |
+
rows = []
|
| 86 |
+
for metric in METRIC_COLUMNS:
|
| 87 |
+
value_a = row_a.get(metric)
|
| 88 |
+
value_b = row_b.get(metric)
|
| 89 |
+
delta = None
|
| 90 |
+
if value_a is not None and value_b is not None:
|
| 91 |
+
delta = value_b - value_a
|
| 92 |
+
rows.append(
|
| 93 |
+
{
|
| 94 |
+
"metric": metric,
|
| 95 |
+
"entry_a": value_a,
|
| 96 |
+
"entry_b": value_b,
|
| 97 |
+
"delta": delta,
|
| 98 |
+
}
|
| 99 |
+
)
|
| 100 |
+
return pd.DataFrame.from_records(rows)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
demo = gr.Blocks(css=custom_css)
|
| 104 |
with demo:
|
| 105 |
gr.HTML(TITLE)
|
| 106 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
| 107 |
+
if LOAD_ERROR:
|
| 108 |
+
gr.Markdown(f"**Data validation error:** {LOAD_ERROR}", elem_classes="markdown-text")
|
| 109 |
|
| 110 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 111 |
+
with gr.TabItem("🏅 Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
|
| 112 |
+
leaderboard = init_leaderboard(LEADERBOARD_DF, COLUMN_ORDER)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
+
with gr.TabItem("🔍 Compare", elem_id="llm-benchmark-tab-table", id=1):
|
| 115 |
+
gr.Markdown(
|
| 116 |
+
"Select two evaluated entries to compare their metrics side by side.",
|
| 117 |
+
elem_classes="markdown-text",
|
| 118 |
+
)
|
| 119 |
with gr.Row():
|
| 120 |
+
entry_a = gr.Dropdown(choices=COMPARE_OPTIONS, label="Entry A", value=None)
|
| 121 |
+
entry_b = gr.Dropdown(choices=COMPARE_OPTIONS, label="Entry B", value=None)
|
| 122 |
+
compare_table = gr.Dataframe(
|
| 123 |
+
value=pd.DataFrame(columns=["metric", "entry_a", "entry_b", "delta"]),
|
| 124 |
+
headers=["metric", "entry_a", "entry_b", "delta"],
|
| 125 |
+
datatype=["str", "number", "number", "number"],
|
| 126 |
+
interactive=False,
|
| 127 |
+
row_count=10,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
)
|
| 129 |
+
entry_a.change(compare_entries, [entry_a, entry_b], compare_table)
|
| 130 |
+
entry_b.change(compare_entries, [entry_a, entry_b], compare_table)
|
| 131 |
+
|
| 132 |
+
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
| 133 |
+
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 134 |
|
| 135 |
with gr.Row():
|
| 136 |
with gr.Accordion("📙 Citation", open=False):
|
|
|
|
| 145 |
scheduler = BackgroundScheduler()
|
| 146 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
| 147 |
scheduler.start()
|
| 148 |
+
demo.queue(default_concurrency_limit=40).launch()
|
requirements.txt
CHANGED
|
@@ -1,16 +1,3 @@
|
|
| 1 |
-
APScheduler
|
| 2 |
-
black
|
| 3 |
-
datasets
|
| 4 |
gradio
|
| 5 |
-
gradio[oauth]
|
| 6 |
gradio_leaderboard==0.0.13
|
| 7 |
-
gradio_client
|
| 8 |
-
huggingface-hub>=0.18.0
|
| 9 |
-
matplotlib
|
| 10 |
-
numpy
|
| 11 |
pandas
|
| 12 |
-
python-dateutil
|
| 13 |
-
tqdm
|
| 14 |
-
transformers
|
| 15 |
-
tokenizers>=0.15.0
|
| 16 |
-
sentencepiece
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
gradio
|
|
|
|
| 2 |
gradio_leaderboard==0.0.13
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
pandas
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/about.py
CHANGED
|
@@ -1,72 +1,29 @@
|
|
| 1 |
-
|
| 2 |
-
from enum import Enum
|
| 3 |
|
| 4 |
-
@dataclass
|
| 5 |
-
class Task:
|
| 6 |
-
benchmark: str
|
| 7 |
-
metric: str
|
| 8 |
-
col_name: str
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
# Select your tasks here
|
| 12 |
-
# ---------------------------------------------------
|
| 13 |
-
class Tasks(Enum):
|
| 14 |
-
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
| 15 |
-
task0 = Task("anli_r1", "acc", "ANLI")
|
| 16 |
-
task1 = Task("logiqa", "acc_norm", "LogiQA")
|
| 17 |
-
|
| 18 |
-
NUM_FEWSHOT = 0 # Change with your few shot
|
| 19 |
-
# ---------------------------------------------------
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
# Your leaderboard name
|
| 24 |
-
TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
|
| 25 |
-
|
| 26 |
-
# What does your leaderboard evaluate?
|
| 27 |
INTRODUCTION_TEXT = """
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
"""
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
## How it works
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
|
|
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
EVALUATION_QUEUE_TEXT = """
|
| 41 |
-
## Some good practices before submitting a model
|
| 42 |
-
|
| 43 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
| 44 |
-
```python
|
| 45 |
-
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
| 46 |
-
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
| 47 |
-
model = AutoModel.from_pretrained("your model name", revision=revision)
|
| 48 |
-
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
| 49 |
-
```
|
| 50 |
-
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
| 51 |
-
|
| 52 |
-
Note: make sure your model is public!
|
| 53 |
-
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
| 54 |
|
| 55 |
-
|
| 56 |
-
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
### 4) Fill up your model card
|
| 62 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
| 63 |
-
|
| 64 |
-
## In case of model failure
|
| 65 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
| 66 |
-
Make sure you have followed the above steps first.
|
| 67 |
-
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
| 68 |
"""
|
| 69 |
|
|
|
|
|
|
|
| 70 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 71 |
CITATION_BUTTON_TEXT = r"""
|
| 72 |
"""
|
|
|
|
| 1 |
+
TITLE = """<h1 align="center" id="space-title">TemporalBench Leaderboard</h1>"""
|
|
|
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
INTRODUCTION_TEXT = """
|
| 4 |
+
This leaderboard presents **offline** evaluation results for agent configurations on the
|
| 5 |
+
TemporalBench benchmark. It is a pure visualization and validation layer: no agents are
|
| 6 |
+
executed here, and no LLM APIs are called.
|
| 7 |
"""
|
| 8 |
|
| 9 |
+
LLM_BENCHMARKS_TEXT = """
|
| 10 |
+
## What this leaderboard shows
|
|
|
|
| 11 |
|
| 12 |
+
- One row per evaluated agent configuration
|
| 13 |
+
- Task-family metrics for TemporalBench (T1–T4)
|
| 14 |
+
- Optional domain-level metrics when provided (e.g., Retail_T3_acc)
|
| 15 |
|
| 16 |
+
## Data requirements
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
Results are loaded from a local JSON or CSV file. Each record must include:
|
|
|
|
| 19 |
|
| 20 |
+
- Identity fields: `model_name`, `agent_name`, `agent_type`, `base_model`
|
| 21 |
+
- Required metrics: `T1_acc`, `T2_acc`, `T3_acc`, `T4_acc`
|
| 22 |
+
- Optional metrics: `T2_MAE`, `T4_sMAPE`, and any additional numeric columns
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
"""
|
| 24 |
|
| 25 |
+
EVALUATION_QUEUE_TEXT = ""
|
| 26 |
+
|
| 27 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 28 |
CITATION_BUTTON_TEXT = r"""
|
| 29 |
"""
|
src/display/css_html_js.py
CHANGED
|
@@ -38,7 +38,7 @@ custom_css = """
|
|
| 38 |
padding: 0px;
|
| 39 |
}
|
| 40 |
|
| 41 |
-
/* Limit the width of the
|
| 42 |
#leaderboard-table td:nth-child(2),
|
| 43 |
#leaderboard-table th:nth-child(2) {
|
| 44 |
max-width: 400px;
|
|
|
|
| 38 |
padding: 0px;
|
| 39 |
}
|
| 40 |
|
| 41 |
+
/* Limit the width of the model column so that names don't expand too much */
|
| 42 |
#leaderboard-table td:nth-child(2),
|
| 43 |
#leaderboard-table th:nth-child(2) {
|
| 44 |
max-width: 400px;
|
src/display/utils.py
DELETED
|
@@ -1,110 +0,0 @@
|
|
| 1 |
-
from dataclasses import dataclass, make_dataclass
|
| 2 |
-
from enum import Enum
|
| 3 |
-
|
| 4 |
-
import pandas as pd
|
| 5 |
-
|
| 6 |
-
from src.about import Tasks
|
| 7 |
-
|
| 8 |
-
def fields(raw_class):
|
| 9 |
-
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
# These classes are for user facing column names,
|
| 13 |
-
# to avoid having to change them all around the code
|
| 14 |
-
# when a modif is needed
|
| 15 |
-
@dataclass
|
| 16 |
-
class ColumnContent:
|
| 17 |
-
name: str
|
| 18 |
-
type: str
|
| 19 |
-
displayed_by_default: bool
|
| 20 |
-
hidden: bool = False
|
| 21 |
-
never_hidden: bool = False
|
| 22 |
-
|
| 23 |
-
## Leaderboard columns
|
| 24 |
-
auto_eval_column_dict = []
|
| 25 |
-
# Init
|
| 26 |
-
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
| 27 |
-
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
| 28 |
-
#Scores
|
| 29 |
-
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
| 30 |
-
for task in Tasks:
|
| 31 |
-
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
| 32 |
-
# Model information
|
| 33 |
-
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
| 34 |
-
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
| 35 |
-
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
| 36 |
-
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
| 37 |
-
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
| 38 |
-
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
| 39 |
-
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
| 40 |
-
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
| 41 |
-
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
| 42 |
-
|
| 43 |
-
# We use make dataclass to dynamically fill the scores from Tasks
|
| 44 |
-
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
| 45 |
-
|
| 46 |
-
## For the queue columns in the submission tab
|
| 47 |
-
@dataclass(frozen=True)
|
| 48 |
-
class EvalQueueColumn: # Queue column
|
| 49 |
-
model = ColumnContent("model", "markdown", True)
|
| 50 |
-
revision = ColumnContent("revision", "str", True)
|
| 51 |
-
private = ColumnContent("private", "bool", True)
|
| 52 |
-
precision = ColumnContent("precision", "str", True)
|
| 53 |
-
weight_type = ColumnContent("weight_type", "str", "Original")
|
| 54 |
-
status = ColumnContent("status", "str", True)
|
| 55 |
-
|
| 56 |
-
## All the model information that we might need
|
| 57 |
-
@dataclass
|
| 58 |
-
class ModelDetails:
|
| 59 |
-
name: str
|
| 60 |
-
display_name: str = ""
|
| 61 |
-
symbol: str = "" # emoji
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
class ModelType(Enum):
|
| 65 |
-
PT = ModelDetails(name="pretrained", symbol="🟢")
|
| 66 |
-
FT = ModelDetails(name="fine-tuned", symbol="🔶")
|
| 67 |
-
IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
|
| 68 |
-
RL = ModelDetails(name="RL-tuned", symbol="🟦")
|
| 69 |
-
Unknown = ModelDetails(name="", symbol="?")
|
| 70 |
-
|
| 71 |
-
def to_str(self, separator=" "):
|
| 72 |
-
return f"{self.value.symbol}{separator}{self.value.name}"
|
| 73 |
-
|
| 74 |
-
@staticmethod
|
| 75 |
-
def from_str(type):
|
| 76 |
-
if "fine-tuned" in type or "🔶" in type:
|
| 77 |
-
return ModelType.FT
|
| 78 |
-
if "pretrained" in type or "🟢" in type:
|
| 79 |
-
return ModelType.PT
|
| 80 |
-
if "RL-tuned" in type or "🟦" in type:
|
| 81 |
-
return ModelType.RL
|
| 82 |
-
if "instruction-tuned" in type or "⭕" in type:
|
| 83 |
-
return ModelType.IFT
|
| 84 |
-
return ModelType.Unknown
|
| 85 |
-
|
| 86 |
-
class WeightType(Enum):
|
| 87 |
-
Adapter = ModelDetails("Adapter")
|
| 88 |
-
Original = ModelDetails("Original")
|
| 89 |
-
Delta = ModelDetails("Delta")
|
| 90 |
-
|
| 91 |
-
class Precision(Enum):
|
| 92 |
-
float16 = ModelDetails("float16")
|
| 93 |
-
bfloat16 = ModelDetails("bfloat16")
|
| 94 |
-
Unknown = ModelDetails("?")
|
| 95 |
-
|
| 96 |
-
def from_str(precision):
|
| 97 |
-
if precision in ["torch.float16", "float16"]:
|
| 98 |
-
return Precision.float16
|
| 99 |
-
if precision in ["torch.bfloat16", "bfloat16"]:
|
| 100 |
-
return Precision.bfloat16
|
| 101 |
-
return Precision.Unknown
|
| 102 |
-
|
| 103 |
-
# Column selection
|
| 104 |
-
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
| 105 |
-
|
| 106 |
-
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
| 107 |
-
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
| 108 |
-
|
| 109 |
-
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/envs.py
CHANGED
|
@@ -1,25 +1,4 @@
|
|
| 1 |
import os
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
# Info to change for your repository
|
| 6 |
-
# ----------------------------------
|
| 7 |
-
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
| 8 |
-
|
| 9 |
-
OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
| 10 |
-
# ----------------------------------
|
| 11 |
-
|
| 12 |
-
REPO_ID = f"{OWNER}/leaderboard"
|
| 13 |
-
QUEUE_REPO = f"{OWNER}/requests"
|
| 14 |
-
RESULTS_REPO = f"{OWNER}/results"
|
| 15 |
-
|
| 16 |
-
# If you setup a cache later, just change HF_HOME
|
| 17 |
-
CACHE_PATH=os.getenv("HF_HOME", ".")
|
| 18 |
-
|
| 19 |
-
# Local caches
|
| 20 |
-
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
| 21 |
-
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
| 22 |
-
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
| 23 |
-
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
| 24 |
-
|
| 25 |
-
API = HfApi(token=TOKEN)
|
|
|
|
| 1 |
import os
|
| 2 |
|
| 3 |
+
# Local results file (JSON or CSV). Override with TEMPORALBENCH_RESULTS_PATH.
|
| 4 |
+
RESULTS_PATH = os.environ.get("TEMPORALBENCH_RESULTS_PATH", "data/results.json")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/read_evals.py
DELETED
|
@@ -1,196 +0,0 @@
|
|
| 1 |
-
import glob
|
| 2 |
-
import json
|
| 3 |
-
import math
|
| 4 |
-
import os
|
| 5 |
-
from dataclasses import dataclass
|
| 6 |
-
|
| 7 |
-
import dateutil
|
| 8 |
-
import numpy as np
|
| 9 |
-
|
| 10 |
-
from src.display.formatting import make_clickable_model
|
| 11 |
-
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
| 12 |
-
from src.submission.check_validity import is_model_on_hub
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
@dataclass
|
| 16 |
-
class EvalResult:
|
| 17 |
-
"""Represents one full evaluation. Built from a combination of the result and request file for a given run.
|
| 18 |
-
"""
|
| 19 |
-
eval_name: str # org_model_precision (uid)
|
| 20 |
-
full_model: str # org/model (path on hub)
|
| 21 |
-
org: str
|
| 22 |
-
model: str
|
| 23 |
-
revision: str # commit hash, "" if main
|
| 24 |
-
results: dict
|
| 25 |
-
precision: Precision = Precision.Unknown
|
| 26 |
-
model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
| 27 |
-
weight_type: WeightType = WeightType.Original # Original or Adapter
|
| 28 |
-
architecture: str = "Unknown"
|
| 29 |
-
license: str = "?"
|
| 30 |
-
likes: int = 0
|
| 31 |
-
num_params: int = 0
|
| 32 |
-
date: str = "" # submission date of request file
|
| 33 |
-
still_on_hub: bool = False
|
| 34 |
-
|
| 35 |
-
@classmethod
|
| 36 |
-
def init_from_json_file(self, json_filepath):
|
| 37 |
-
"""Inits the result from the specific model result file"""
|
| 38 |
-
with open(json_filepath) as fp:
|
| 39 |
-
data = json.load(fp)
|
| 40 |
-
|
| 41 |
-
config = data.get("config")
|
| 42 |
-
|
| 43 |
-
# Precision
|
| 44 |
-
precision = Precision.from_str(config.get("model_dtype"))
|
| 45 |
-
|
| 46 |
-
# Get model and org
|
| 47 |
-
org_and_model = config.get("model_name", config.get("model_args", None))
|
| 48 |
-
org_and_model = org_and_model.split("/", 1)
|
| 49 |
-
|
| 50 |
-
if len(org_and_model) == 1:
|
| 51 |
-
org = None
|
| 52 |
-
model = org_and_model[0]
|
| 53 |
-
result_key = f"{model}_{precision.value.name}"
|
| 54 |
-
else:
|
| 55 |
-
org = org_and_model[0]
|
| 56 |
-
model = org_and_model[1]
|
| 57 |
-
result_key = f"{org}_{model}_{precision.value.name}"
|
| 58 |
-
full_model = "/".join(org_and_model)
|
| 59 |
-
|
| 60 |
-
still_on_hub, _, model_config = is_model_on_hub(
|
| 61 |
-
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
| 62 |
-
)
|
| 63 |
-
architecture = "?"
|
| 64 |
-
if model_config is not None:
|
| 65 |
-
architectures = getattr(model_config, "architectures", None)
|
| 66 |
-
if architectures:
|
| 67 |
-
architecture = ";".join(architectures)
|
| 68 |
-
|
| 69 |
-
# Extract results available in this file (some results are split in several files)
|
| 70 |
-
results = {}
|
| 71 |
-
for task in Tasks:
|
| 72 |
-
task = task.value
|
| 73 |
-
|
| 74 |
-
# We average all scores of a given metric (not all metrics are present in all files)
|
| 75 |
-
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
|
| 76 |
-
if accs.size == 0 or any([acc is None for acc in accs]):
|
| 77 |
-
continue
|
| 78 |
-
|
| 79 |
-
mean_acc = np.mean(accs) * 100.0
|
| 80 |
-
results[task.benchmark] = mean_acc
|
| 81 |
-
|
| 82 |
-
return self(
|
| 83 |
-
eval_name=result_key,
|
| 84 |
-
full_model=full_model,
|
| 85 |
-
org=org,
|
| 86 |
-
model=model,
|
| 87 |
-
results=results,
|
| 88 |
-
precision=precision,
|
| 89 |
-
revision= config.get("model_sha", ""),
|
| 90 |
-
still_on_hub=still_on_hub,
|
| 91 |
-
architecture=architecture
|
| 92 |
-
)
|
| 93 |
-
|
| 94 |
-
def update_with_request_file(self, requests_path):
|
| 95 |
-
"""Finds the relevant request file for the current model and updates info with it"""
|
| 96 |
-
request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
|
| 97 |
-
|
| 98 |
-
try:
|
| 99 |
-
with open(request_file, "r") as f:
|
| 100 |
-
request = json.load(f)
|
| 101 |
-
self.model_type = ModelType.from_str(request.get("model_type", ""))
|
| 102 |
-
self.weight_type = WeightType[request.get("weight_type", "Original")]
|
| 103 |
-
self.license = request.get("license", "?")
|
| 104 |
-
self.likes = request.get("likes", 0)
|
| 105 |
-
self.num_params = request.get("params", 0)
|
| 106 |
-
self.date = request.get("submitted_time", "")
|
| 107 |
-
except Exception:
|
| 108 |
-
print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
|
| 109 |
-
|
| 110 |
-
def to_dict(self):
|
| 111 |
-
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
| 112 |
-
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
| 113 |
-
data_dict = {
|
| 114 |
-
"eval_name": self.eval_name, # not a column, just a save name,
|
| 115 |
-
AutoEvalColumn.precision.name: self.precision.value.name,
|
| 116 |
-
AutoEvalColumn.model_type.name: self.model_type.value.name,
|
| 117 |
-
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
| 118 |
-
AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
| 119 |
-
AutoEvalColumn.architecture.name: self.architecture,
|
| 120 |
-
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
| 121 |
-
AutoEvalColumn.revision.name: self.revision,
|
| 122 |
-
AutoEvalColumn.average.name: average,
|
| 123 |
-
AutoEvalColumn.license.name: self.license,
|
| 124 |
-
AutoEvalColumn.likes.name: self.likes,
|
| 125 |
-
AutoEvalColumn.params.name: self.num_params,
|
| 126 |
-
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
| 127 |
-
}
|
| 128 |
-
|
| 129 |
-
for task in Tasks:
|
| 130 |
-
data_dict[task.value.col_name] = self.results[task.value.benchmark]
|
| 131 |
-
|
| 132 |
-
return data_dict
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
def get_request_file_for_model(requests_path, model_name, precision):
|
| 136 |
-
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
| 137 |
-
request_files = os.path.join(
|
| 138 |
-
requests_path,
|
| 139 |
-
f"{model_name}_eval_request_*.json",
|
| 140 |
-
)
|
| 141 |
-
request_files = glob.glob(request_files)
|
| 142 |
-
|
| 143 |
-
# Select correct request file (precision)
|
| 144 |
-
request_file = ""
|
| 145 |
-
request_files = sorted(request_files, reverse=True)
|
| 146 |
-
for tmp_request_file in request_files:
|
| 147 |
-
with open(tmp_request_file, "r") as f:
|
| 148 |
-
req_content = json.load(f)
|
| 149 |
-
if (
|
| 150 |
-
req_content["status"] in ["FINISHED"]
|
| 151 |
-
and req_content["precision"] == precision.split(".")[-1]
|
| 152 |
-
):
|
| 153 |
-
request_file = tmp_request_file
|
| 154 |
-
return request_file
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
| 158 |
-
"""From the path of the results folder root, extract all needed info for results"""
|
| 159 |
-
model_result_filepaths = []
|
| 160 |
-
|
| 161 |
-
for root, _, files in os.walk(results_path):
|
| 162 |
-
# We should only have json files in model results
|
| 163 |
-
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
| 164 |
-
continue
|
| 165 |
-
|
| 166 |
-
# Sort the files by date
|
| 167 |
-
try:
|
| 168 |
-
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
|
| 169 |
-
except dateutil.parser._parser.ParserError:
|
| 170 |
-
files = [files[-1]]
|
| 171 |
-
|
| 172 |
-
for file in files:
|
| 173 |
-
model_result_filepaths.append(os.path.join(root, file))
|
| 174 |
-
|
| 175 |
-
eval_results = {}
|
| 176 |
-
for model_result_filepath in model_result_filepaths:
|
| 177 |
-
# Creation of result
|
| 178 |
-
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
| 179 |
-
eval_result.update_with_request_file(requests_path)
|
| 180 |
-
|
| 181 |
-
# Store results of same eval together
|
| 182 |
-
eval_name = eval_result.eval_name
|
| 183 |
-
if eval_name in eval_results.keys():
|
| 184 |
-
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
| 185 |
-
else:
|
| 186 |
-
eval_results[eval_name] = eval_result
|
| 187 |
-
|
| 188 |
-
results = []
|
| 189 |
-
for v in eval_results.values():
|
| 190 |
-
try:
|
| 191 |
-
v.to_dict() # we test if the dict version is complete
|
| 192 |
-
results.append(v)
|
| 193 |
-
except KeyError: # not all eval values present
|
| 194 |
-
continue
|
| 195 |
-
|
| 196 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/populate.py
DELETED
|
@@ -1,58 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
|
| 4 |
-
import pandas as pd
|
| 5 |
-
|
| 6 |
-
from src.display.formatting import has_no_nan_values, make_clickable_model
|
| 7 |
-
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
| 8 |
-
from src.leaderboard.read_evals import get_raw_eval_results
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
| 12 |
-
"""Creates a dataframe from all the individual experiment results"""
|
| 13 |
-
raw_data = get_raw_eval_results(results_path, requests_path)
|
| 14 |
-
all_data_json = [v.to_dict() for v in raw_data]
|
| 15 |
-
|
| 16 |
-
df = pd.DataFrame.from_records(all_data_json)
|
| 17 |
-
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
| 18 |
-
df = df[cols].round(decimals=2)
|
| 19 |
-
|
| 20 |
-
# filter out if any of the benchmarks have not been produced
|
| 21 |
-
df = df[has_no_nan_values(df, benchmark_cols)]
|
| 22 |
-
return df
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
| 26 |
-
"""Creates the different dataframes for the evaluation queues requestes"""
|
| 27 |
-
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
| 28 |
-
all_evals = []
|
| 29 |
-
|
| 30 |
-
for entry in entries:
|
| 31 |
-
if ".json" in entry:
|
| 32 |
-
file_path = os.path.join(save_path, entry)
|
| 33 |
-
with open(file_path) as fp:
|
| 34 |
-
data = json.load(fp)
|
| 35 |
-
|
| 36 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
| 37 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
| 38 |
-
|
| 39 |
-
all_evals.append(data)
|
| 40 |
-
elif ".md" not in entry:
|
| 41 |
-
# this is a folder
|
| 42 |
-
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
|
| 43 |
-
for sub_entry in sub_entries:
|
| 44 |
-
file_path = os.path.join(save_path, entry, sub_entry)
|
| 45 |
-
with open(file_path) as fp:
|
| 46 |
-
data = json.load(fp)
|
| 47 |
-
|
| 48 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
| 49 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
| 50 |
-
all_evals.append(data)
|
| 51 |
-
|
| 52 |
-
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
| 53 |
-
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
| 54 |
-
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
| 55 |
-
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
| 56 |
-
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
| 57 |
-
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
| 58 |
-
return df_finished[cols], df_running[cols], df_pending[cols]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/check_validity.py
DELETED
|
@@ -1,99 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
import re
|
| 4 |
-
from collections import defaultdict
|
| 5 |
-
from datetime import datetime, timedelta, timezone
|
| 6 |
-
|
| 7 |
-
import huggingface_hub
|
| 8 |
-
from huggingface_hub import ModelCard
|
| 9 |
-
from huggingface_hub.hf_api import ModelInfo
|
| 10 |
-
from transformers import AutoConfig
|
| 11 |
-
from transformers.models.auto.tokenization_auto import AutoTokenizer
|
| 12 |
-
|
| 13 |
-
def check_model_card(repo_id: str) -> tuple[bool, str]:
|
| 14 |
-
"""Checks if the model card and license exist and have been filled"""
|
| 15 |
-
try:
|
| 16 |
-
card = ModelCard.load(repo_id)
|
| 17 |
-
except huggingface_hub.utils.EntryNotFoundError:
|
| 18 |
-
return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
|
| 19 |
-
|
| 20 |
-
# Enforce license metadata
|
| 21 |
-
if card.data.license is None:
|
| 22 |
-
if not ("license_name" in card.data and "license_link" in card.data):
|
| 23 |
-
return False, (
|
| 24 |
-
"License not found. Please add a license to your model card using the `license` metadata or a"
|
| 25 |
-
" `license_name`/`license_link` pair."
|
| 26 |
-
)
|
| 27 |
-
|
| 28 |
-
# Enforce card content
|
| 29 |
-
if len(card.text) < 200:
|
| 30 |
-
return False, "Please add a description to your model card, it is too short."
|
| 31 |
-
|
| 32 |
-
return True, ""
|
| 33 |
-
|
| 34 |
-
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
| 35 |
-
"""Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
|
| 36 |
-
try:
|
| 37 |
-
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
| 38 |
-
if test_tokenizer:
|
| 39 |
-
try:
|
| 40 |
-
tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
| 41 |
-
except ValueError as e:
|
| 42 |
-
return (
|
| 43 |
-
False,
|
| 44 |
-
f"uses a tokenizer which is not in a transformers release: {e}",
|
| 45 |
-
None
|
| 46 |
-
)
|
| 47 |
-
except Exception as e:
|
| 48 |
-
return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
|
| 49 |
-
return True, None, config
|
| 50 |
-
|
| 51 |
-
except ValueError:
|
| 52 |
-
return (
|
| 53 |
-
False,
|
| 54 |
-
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
| 55 |
-
None
|
| 56 |
-
)
|
| 57 |
-
|
| 58 |
-
except Exception as e:
|
| 59 |
-
return False, "was not found on hub!", None
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
def get_model_size(model_info: ModelInfo, precision: str):
|
| 63 |
-
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
| 64 |
-
try:
|
| 65 |
-
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
| 66 |
-
except (AttributeError, TypeError):
|
| 67 |
-
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
| 68 |
-
|
| 69 |
-
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
| 70 |
-
model_size = size_factor * model_size
|
| 71 |
-
return model_size
|
| 72 |
-
|
| 73 |
-
def get_model_arch(model_info: ModelInfo):
|
| 74 |
-
"""Gets the model architecture from the configuration"""
|
| 75 |
-
return model_info.config.get("architectures", "Unknown")
|
| 76 |
-
|
| 77 |
-
def already_submitted_models(requested_models_dir: str) -> set[str]:
|
| 78 |
-
"""Gather a list of already submitted models to avoid duplicates"""
|
| 79 |
-
depth = 1
|
| 80 |
-
file_names = []
|
| 81 |
-
users_to_submission_dates = defaultdict(list)
|
| 82 |
-
|
| 83 |
-
for root, _, files in os.walk(requested_models_dir):
|
| 84 |
-
current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
|
| 85 |
-
if current_depth == depth:
|
| 86 |
-
for file in files:
|
| 87 |
-
if not file.endswith(".json"):
|
| 88 |
-
continue
|
| 89 |
-
with open(os.path.join(root, file), "r") as f:
|
| 90 |
-
info = json.load(f)
|
| 91 |
-
file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
| 92 |
-
|
| 93 |
-
# Select organisation
|
| 94 |
-
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
| 95 |
-
continue
|
| 96 |
-
organisation, _ = info["model"].split("/")
|
| 97 |
-
users_to_submission_dates[organisation].append(info["submitted_time"])
|
| 98 |
-
|
| 99 |
-
return set(file_names), users_to_submission_dates
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/submit.py
DELETED
|
@@ -1,119 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
from datetime import datetime, timezone
|
| 4 |
-
|
| 5 |
-
from src.display.formatting import styled_error, styled_message, styled_warning
|
| 6 |
-
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
| 7 |
-
from src.submission.check_validity import (
|
| 8 |
-
already_submitted_models,
|
| 9 |
-
check_model_card,
|
| 10 |
-
get_model_size,
|
| 11 |
-
is_model_on_hub,
|
| 12 |
-
)
|
| 13 |
-
|
| 14 |
-
REQUESTED_MODELS = None
|
| 15 |
-
USERS_TO_SUBMISSION_DATES = None
|
| 16 |
-
|
| 17 |
-
def add_new_eval(
|
| 18 |
-
model: str,
|
| 19 |
-
base_model: str,
|
| 20 |
-
revision: str,
|
| 21 |
-
precision: str,
|
| 22 |
-
weight_type: str,
|
| 23 |
-
model_type: str,
|
| 24 |
-
):
|
| 25 |
-
global REQUESTED_MODELS
|
| 26 |
-
global USERS_TO_SUBMISSION_DATES
|
| 27 |
-
if not REQUESTED_MODELS:
|
| 28 |
-
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
| 29 |
-
|
| 30 |
-
user_name = ""
|
| 31 |
-
model_path = model
|
| 32 |
-
if "/" in model:
|
| 33 |
-
user_name = model.split("/")[0]
|
| 34 |
-
model_path = model.split("/")[1]
|
| 35 |
-
|
| 36 |
-
precision = precision.split(" ")[0]
|
| 37 |
-
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
| 38 |
-
|
| 39 |
-
if model_type is None or model_type == "":
|
| 40 |
-
return styled_error("Please select a model type.")
|
| 41 |
-
|
| 42 |
-
# Does the model actually exist?
|
| 43 |
-
if revision == "":
|
| 44 |
-
revision = "main"
|
| 45 |
-
|
| 46 |
-
# Is the model on the hub?
|
| 47 |
-
if weight_type in ["Delta", "Adapter"]:
|
| 48 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
|
| 49 |
-
if not base_model_on_hub:
|
| 50 |
-
return styled_error(f'Base model "{base_model}" {error}')
|
| 51 |
-
|
| 52 |
-
if not weight_type == "Adapter":
|
| 53 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
| 54 |
-
if not model_on_hub:
|
| 55 |
-
return styled_error(f'Model "{model}" {error}')
|
| 56 |
-
|
| 57 |
-
# Is the model info correctly filled?
|
| 58 |
-
try:
|
| 59 |
-
model_info = API.model_info(repo_id=model, revision=revision)
|
| 60 |
-
except Exception:
|
| 61 |
-
return styled_error("Could not get your model information. Please fill it up properly.")
|
| 62 |
-
|
| 63 |
-
model_size = get_model_size(model_info=model_info, precision=precision)
|
| 64 |
-
|
| 65 |
-
# Were the model card and license filled?
|
| 66 |
-
try:
|
| 67 |
-
license = model_info.cardData["license"]
|
| 68 |
-
except Exception:
|
| 69 |
-
return styled_error("Please select a license for your model")
|
| 70 |
-
|
| 71 |
-
modelcard_OK, error_msg = check_model_card(model)
|
| 72 |
-
if not modelcard_OK:
|
| 73 |
-
return styled_error(error_msg)
|
| 74 |
-
|
| 75 |
-
# Seems good, creating the eval
|
| 76 |
-
print("Adding new eval")
|
| 77 |
-
|
| 78 |
-
eval_entry = {
|
| 79 |
-
"model": model,
|
| 80 |
-
"base_model": base_model,
|
| 81 |
-
"revision": revision,
|
| 82 |
-
"precision": precision,
|
| 83 |
-
"weight_type": weight_type,
|
| 84 |
-
"status": "PENDING",
|
| 85 |
-
"submitted_time": current_time,
|
| 86 |
-
"model_type": model_type,
|
| 87 |
-
"likes": model_info.likes,
|
| 88 |
-
"params": model_size,
|
| 89 |
-
"license": license,
|
| 90 |
-
"private": False,
|
| 91 |
-
}
|
| 92 |
-
|
| 93 |
-
# Check for duplicate submission
|
| 94 |
-
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
|
| 95 |
-
return styled_warning("This model has been already submitted.")
|
| 96 |
-
|
| 97 |
-
print("Creating eval file")
|
| 98 |
-
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
| 99 |
-
os.makedirs(OUT_DIR, exist_ok=True)
|
| 100 |
-
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
|
| 101 |
-
|
| 102 |
-
with open(out_path, "w") as f:
|
| 103 |
-
f.write(json.dumps(eval_entry))
|
| 104 |
-
|
| 105 |
-
print("Uploading eval file")
|
| 106 |
-
API.upload_file(
|
| 107 |
-
path_or_fileobj=out_path,
|
| 108 |
-
path_in_repo=out_path.split("eval-queue/")[1],
|
| 109 |
-
repo_id=QUEUE_REPO,
|
| 110 |
-
repo_type="dataset",
|
| 111 |
-
commit_message=f"Add {model} to eval queue",
|
| 112 |
-
)
|
| 113 |
-
|
| 114 |
-
# Remove the local file
|
| 115 |
-
os.remove(out_path)
|
| 116 |
-
|
| 117 |
-
return styled_message(
|
| 118 |
-
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
|
| 119 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|