doubao-bench commited on
Commit
0339608
Β·
1 Parent(s): f10384f
app.py CHANGED
@@ -69,22 +69,9 @@ def init_leaderboard(dataframe):
69
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
70
  label="Select Columns to Display:",
71
  ),
72
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
73
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
74
- filter_columns=[
75
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
76
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
77
- ColumnFilter(
78
- AutoEvalColumn.params.name,
79
- type="slider",
80
- min=0.01,
81
- max=150,
82
- label="Select the number of parameters (B)",
83
- ),
84
- ColumnFilter(
85
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
86
- ),
87
- ],
88
  bool_checkboxgroup_label="Hide models",
89
  interactive=False,
90
  )
@@ -102,102 +89,32 @@ with demo:
102
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
103
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
104
 
105
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
106
- with gr.Column():
107
- with gr.Row():
108
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
109
-
110
- with gr.Column():
111
- with gr.Accordion(
112
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
113
- open=False,
114
- ):
115
- with gr.Row():
116
- finished_eval_table = gr.components.Dataframe(
117
- value=finished_eval_queue_df,
118
- headers=EVAL_COLS,
119
- datatype=EVAL_TYPES,
120
- row_count=5,
121
- )
122
- with gr.Accordion(
123
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
124
- open=False,
125
- ):
126
- with gr.Row():
127
- running_eval_table = gr.components.Dataframe(
128
- value=running_eval_queue_df,
129
- headers=EVAL_COLS,
130
- datatype=EVAL_TYPES,
131
- row_count=5,
132
- )
133
-
134
- with gr.Accordion(
135
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
136
- open=False,
137
- ):
138
- with gr.Row():
139
- pending_eval_table = gr.components.Dataframe(
140
- value=pending_eval_queue_df,
141
- headers=EVAL_COLS,
142
- datatype=EVAL_TYPES,
143
- row_count=5,
144
- )
145
- with gr.Row():
146
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
147
-
148
- with gr.Row():
149
- with gr.Column():
150
- model_name_textbox = gr.Textbox(label="Model name")
151
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
152
- model_type = gr.Dropdown(
153
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
154
- label="Model type",
155
- multiselect=False,
156
- value=None,
157
- interactive=True,
158
- )
159
-
160
- with gr.Column():
161
- precision = gr.Dropdown(
162
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
163
- label="Precision",
164
- multiselect=False,
165
- value="float16",
166
- interactive=True,
167
- )
168
- weight_type = gr.Dropdown(
169
- choices=[i.value.name for i in WeightType],
170
- label="Weights type",
171
- multiselect=False,
172
- value="Original",
173
- interactive=True,
174
- )
175
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
176
-
177
- submit_button = gr.Button("Submit Eval")
178
- submission_result = gr.Markdown()
179
- submit_button.click(
180
- add_new_eval,
181
- [
182
- model_name_textbox,
183
- base_model_name_textbox,
184
- revision_name_textbox,
185
- precision,
186
- weight_type,
187
- model_type,
188
- ],
189
- submission_result,
190
- )
191
-
192
- with gr.Row():
193
- with gr.Accordion("πŸ“™ Citation", open=False):
194
- citation_button = gr.Textbox(
195
- value=CITATION_BUTTON_TEXT,
196
- label=CITATION_BUTTON_LABEL,
197
- lines=20,
198
- elem_id="citation-button",
199
- show_copy_button=True,
200
- )
201
 
202
  scheduler = BackgroundScheduler()
203
  scheduler.add_job(restart_space, "interval", seconds=1800)
 
69
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
70
  label="Select Columns to Display:",
71
  ),
72
+ search_columns=[AutoEvalColumn.model.name],
73
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
74
+ filter_columns=[],
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  bool_checkboxgroup_label="Hide models",
76
  interactive=False,
77
  )
 
89
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
90
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
91
 
92
+ with gr.TabItem("πŸš€ Submit", elem_id="llm-benchmark-tab-table", id=3):
93
+ gr.Markdown("""
94
+ We welcome community submissions of new model evaluation results. Those submissions will be listed as 'External', and authors must upload their generated outputs for peer review.
95
+
96
+ ## Evaluation
97
+ Evaluation [Setup](https://huggingface.co/docs/hub/spaces-overview) and [Usage](https://huggingface.co/docs/hub/spaces-overview). This will generate a markdown report summarizing the results.
98
+
99
+ ## Submission
100
+ To submit your results, create a Pull Request in the [Community Tab](https://huggingface.co/spaces/doubao-bench/web-bench-leaderboard/discussions) to add them to the `src/custom-eval-results` folder in this repository:
101
+
102
+ * Create a new folder named with your provider and model names (e.g., `ollama_mistral-small`, using underscores to separate parts).
103
+ * Each folder stores the evaluation results of only one model.
104
+ * Add a `base_meta.json` file with the following fields:
105
+ * **Model**: the name of your model
106
+ * **Model Link**: the link to the model page
107
+ * **Provider**: the name of the provider
108
+ * **Openness**: the openness of the model
109
+ * **Agent**: the agent used for evaluation, `Web-Agent` or your custom agent name
110
+ * Put your generated reports (e.g. `eval-20258513-102235.zip`) in your folder.
111
+ * The title of the PR should be: `[Community Submission] Model: org/model, Username: your_username`.
112
+ * **Tips**: `gen_meta.json` will be created after our review.
113
+
114
+ We will promptly merge and review your submission. Once the review is complete, we will publish the results on the leaderboard.
115
+ """)
116
+
117
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  scheduler = BackgroundScheduler()
120
  scheduler.add_job(restart_space, "interval", seconds=1800)
eval-queue/GPT-5-High_eval_request_float16.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "GPT-5-High",
3
+ "precision": "float16",
4
+ "status": "FINISHED",
5
+ "model_type": "pretrained",
6
+ "submit_type": "official",
7
+ "report": "https://openai.com/gpt-5"
8
+ }
eval-queue/gemini-2.5-Pro_eval_request_float16.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "gemini-2.5-Pro",
3
+ "precision": "float16",
4
+ "status": "FINISHED",
5
+ "model_type": "pretrained",
6
+ "submit_type": "official",
7
+ "report": "https://google.ai/gemini"
8
+ }
eval-results/GPT-5-High.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "GPT-5-High",
4
+ "model_dtype": "float16"
5
+ },
6
+ "results": {
7
+ "anli_r1": {
8
+ "acc": 0.98
9
+ },
10
+ "logiqa": {
11
+ "acc_norm": 0.96
12
+ }
13
+ }
14
+ }
eval-results/gemini-2.5-Pro.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "gemini-2.5-Pro",
4
+ "model_dtype": "float16"
5
+ },
6
+ "results": {
7
+ "anli_r1": {
8
+ "acc": 0.95
9
+ },
10
+ "logiqa": {
11
+ "acc_norm": 0.92
12
+ }
13
+ }
14
+ }
src/about.py CHANGED
@@ -30,10 +30,8 @@ Intro text
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
32
  LLM_BENCHMARKS_TEXT = f"""
33
- ## How it works
34
-
35
- ## Reproducibility
36
- To reproduce our results, here is the commands you can run:
37
 
38
  """
39
 
 
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
32
  LLM_BENCHMARKS_TEXT = f"""
33
+ ## More Information
34
+ More information could be found in [Paper](https://huggingface.co/docs/safetensors/index) or [Github](https://huggingface.co/docs/safetensors/index)
 
 
35
 
36
  """
37
 
src/display/utils.py CHANGED
@@ -23,22 +23,13 @@ class ColumnContent:
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
- for task in Tasks:
31
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
  # Model information
33
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❀️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
 
43
  # We use make dataclass to dynamically fill the scores from Tasks
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
 
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
26
+ auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("score", "number", True)])
 
 
30
  # Model information
31
+ auto_eval_column_dict.append(["submit_type", ColumnContent, ColumnContent("submit_type", "str", True)])
32
+ auto_eval_column_dict.append(["report", ColumnContent, ColumnContent("report", "str", True)])
 
 
 
 
 
 
 
33
 
34
  # We use make dataclass to dynamically fill the scores from Tasks
35
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
src/leaderboard/read_evals.py CHANGED
@@ -31,6 +31,8 @@ class EvalResult:
31
  num_params: int = 0
32
  date: str = "" # submission date of request file
33
  still_on_hub: bool = False
 
 
34
 
35
  @classmethod
36
  def init_from_json_file(self, json_filepath):
@@ -104,6 +106,8 @@ class EvalResult:
104
  self.likes = request.get("likes", 0)
105
  self.num_params = request.get("params", 0)
106
  self.date = request.get("submitted_time", "")
 
 
107
  except Exception:
108
  print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
 
@@ -112,18 +116,11 @@ class EvalResult:
112
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
  data_dict = {
114
  "eval_name": self.eval_name, # not a column, just a save name,
115
- AutoEvalColumn.precision.name: self.precision.value.name,
116
  AutoEvalColumn.model_type.name: self.model_type.value.name,
117
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
- AutoEvalColumn.architecture.name: self.architecture,
120
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
- AutoEvalColumn.revision.name: self.revision,
122
  AutoEvalColumn.average.name: average,
123
- AutoEvalColumn.license.name: self.license,
124
- AutoEvalColumn.likes.name: self.likes,
125
- AutoEvalColumn.params.name: self.num_params,
126
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
  }
128
 
129
  for task in Tasks:
 
31
  num_params: int = 0
32
  date: str = "" # submission date of request file
33
  still_on_hub: bool = False
34
+ submit_type: str = ""
35
+ report: str = ""
36
 
37
  @classmethod
38
  def init_from_json_file(self, json_filepath):
 
106
  self.likes = request.get("likes", 0)
107
  self.num_params = request.get("params", 0)
108
  self.date = request.get("submitted_time", "")
109
+ self.submit_type = request.get("submit_type", "")
110
+ self.report = request.get("report", "")
111
  except Exception:
112
  print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
113
 
 
116
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
117
  data_dict = {
118
  "eval_name": self.eval_name, # not a column, just a save name,
 
119
  AutoEvalColumn.model_type.name: self.model_type.value.name,
 
 
 
120
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
 
121
  AutoEvalColumn.average.name: average,
122
+ AutoEvalColumn.submit_type.name: self.submit_type,
123
+ AutoEvalColumn.report.name: self.report,
 
 
124
  }
125
 
126
  for task in Tasks:
src/populate.py CHANGED
@@ -20,8 +20,6 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
20
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
21
  df = df[cols].round(decimals=2)
22
 
23
- # filter out if any of the benchmarks have not been produced
24
- df = df[has_no_nan_values(df, benchmark_cols)]
25
  return df
26
 
27
 
 
20
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
21
  df = df[cols].round(decimals=2)
22
 
 
 
23
  return df
24
 
25