Samoed ekolodin98 Неизвестный Пользователь722497 Egor commited on
Commit
0bf233b
·
unverified ·
1 Parent(s): 3c2f9df

Fix table generation for models with multiple revisions (#280)

Browse files

* update results

* mv to revision_id

* change model_meta

* update revision_id

* fix meta

* update on scores

* 3b-september-2025

* change revision due to flash-attn dependency

* recalc metrics

* update script with models with multiple revisions

* fix script

* add revisions as row

* fix revision row

* add back code

---------

Co-authored-by: Kolodin Egor <eikolodin@sberbank.ru>
Co-authored-by: Неизвестный Пользователь722497 <dolegosmirnov@sberbank.ru>
Co-authored-by: Egor <31567312+ekolodin@users.noreply.github.com>

results/google__embeddinggemma-300m/64614b0b8b64f0c6c1e52b07e4e9a4e8fe4d2da2/NusaTranslationBitextMining.json CHANGED
@@ -140,4 +140,4 @@
140
  },
141
  "evaluation_time": 207.68819975852966,
142
  "kg_co2_emissions": null
143
- }
 
140
  },
141
  "evaluation_time": 207.68819975852966,
142
  "kg_co2_emissions": null
143
+ }
scripts/create_pr_results_comment.py CHANGED
@@ -34,6 +34,7 @@ import pandas as pd
34
  from mteb.abstasks.AbsTask import AbsTask
35
 
36
  ModelName = str
 
37
 
38
  # Default reference models to compare against
39
  REFERENCE_MODELS: list[str] = [
@@ -64,7 +65,7 @@ def get_diff_from_main() -> list[str]:
64
 
65
  def extract_new_models_and_tasks(
66
  differences: list[str],
67
- ) -> dict[ModelName, list[AbsTask]]:
68
  diffs = [repo_path / diff for diff in differences]
69
  result_diffs = filter(
70
  lambda p: p.exists() and p.suffix == ".json" and p.name != "model_meta.json",
@@ -77,7 +78,9 @@ def extract_new_models_and_tasks(
77
  task_name = diff.stem
78
 
79
  with model_meta.open("r") as f:
80
- model_name = json.load(f)["name"]
 
 
81
 
82
  with diff.open("r") as f:
83
  task_result = json.load(f)
@@ -92,13 +95,14 @@ def extract_new_models_and_tasks(
92
  task = mteb.get_task(
93
  task_name, eval_splits=list(splits), hf_subsets=list(subsets)
94
  )
95
- models_tasks[model_name].append(task)
96
 
97
  return models_tasks
98
 
99
 
100
  def create_comparison_table(
101
  model: ModelName,
 
102
  tasks: list[AbsTask],
103
  reference_models: list[ModelName],
104
  models_in_pr: list[ModelName],
@@ -107,9 +111,30 @@ def create_comparison_table(
107
  max_col_name = "Max result"
108
  task_col_name = "task_name"
109
  results = mteb.load_results(models=models, tasks=tasks, download_latest=False)
110
-
111
- results = results.join_revisions()
112
- df = results.to_dataframe()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
  if df.empty:
115
  raise ValueError(f"No results found for models {models} on tasks {tasks}")
@@ -126,20 +151,27 @@ def create_comparison_table(
126
  max_dataframe = task_results_df.groupby(task_col_name).max()
127
  high_model_performance_tasks = []
128
 
 
129
  if not max_dataframe.empty:
130
  for task_name, row in max_dataframe.iterrows():
131
  df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
132
- model_score = df.loc[df[task_col_name] == task_name, model].values[0]
133
  if model_score > row["score"]:
134
  high_model_performance_tasks.append(task_name)
135
 
136
  averages: dict[str, float | None] = {}
 
 
 
 
137
  for col in models + [max_col_name]:
138
- if col not in df.columns:
 
139
  continue
140
- numeric = pd.to_numeric(df[col], errors="coerce")
141
- avg = numeric.mean()
142
- averages[col] = avg if not pd.isna(avg) else None
 
143
 
144
  avg_row = pd.DataFrame(
145
  {
@@ -171,6 +203,31 @@ def highlight_max_bold(
171
  if pd.notna(row[max_col]):
172
  result_df.at[idx, max_col] = f"**{result_df.at[idx, max_col]}**"
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  return result_df
175
 
176
 
@@ -183,7 +240,7 @@ def generate_markdown_content(
183
  all_tasks = sorted(
184
  {t.metadata.name for tasks in model_tasks.values() for t in tasks}
185
  )
186
- new_models = list(model_tasks.keys())
187
 
188
  parts: list[str] = [
189
  "# Model Results Comparison",
@@ -194,11 +251,11 @@ def generate_markdown_content(
194
  "",
195
  ]
196
 
197
- for model_name, tasks in model_tasks.items():
198
  parts.append(f"## Results for `{model_name}`")
199
 
200
  df, high_model_performance_tasks = create_comparison_table(
201
- model_name, tasks, reference_models, list(model_tasks.keys())
202
  )
203
  bold_df = highlight_max_bold(df)
204
  parts.append(bold_df.to_markdown(index=False))
 
34
  from mteb.abstasks.AbsTask import AbsTask
35
 
36
  ModelName = str
37
+ ModelRevision = str
38
 
39
  # Default reference models to compare against
40
  REFERENCE_MODELS: list[str] = [
 
65
 
66
  def extract_new_models_and_tasks(
67
  differences: list[str],
68
+ ) -> dict[tuple[ModelName, ModelRevision], list[AbsTask]]:
69
  diffs = [repo_path / diff for diff in differences]
70
  result_diffs = filter(
71
  lambda p: p.exists() and p.suffix == ".json" and p.name != "model_meta.json",
 
78
  task_name = diff.stem
79
 
80
  with model_meta.open("r") as f:
81
+ model_meta = json.load(f)
82
+ model_name = model_meta["name"]
83
+ revision = model_meta["revision"]
84
 
85
  with diff.open("r") as f:
86
  task_result = json.load(f)
 
95
  task = mteb.get_task(
96
  task_name, eval_splits=list(splits), hf_subsets=list(subsets)
97
  )
98
+ models_tasks[(model_name, revision)].append(task)
99
 
100
  return models_tasks
101
 
102
 
103
  def create_comparison_table(
104
  model: ModelName,
105
+ new_model_revision: str,
106
  tasks: list[AbsTask],
107
  reference_models: list[ModelName],
108
  models_in_pr: list[ModelName],
 
111
  max_col_name = "Max result"
112
  task_col_name = "task_name"
113
  results = mteb.load_results(models=models, tasks=tasks, download_latest=False)
114
+ df = results.to_dataframe(include_model_revision=True)
115
+ new_df_columns = []
116
+ columns_to_merge = defaultdict(list)
117
+ new_model_revisions = []
118
+ for model_name, revision in df.columns:
119
+ col_with_revision = f"{model_name}__{revision}"
120
+ new_df_columns.append(col_with_revision)
121
+ if model_name != model:
122
+ columns_to_merge[model_name].append(col_with_revision)
123
+ else:
124
+ new_model_revisions.append(col_with_revision)
125
+ # if only one revision of the new model exists, then no need to show revision in the column name
126
+ if len(new_model_revisions) == 1:
127
+ columns_to_merge[model] = new_model_revisions
128
+
129
+ df.columns = new_df_columns
130
+
131
+ # Merge columns with the same model name by taking the maximum value
132
+ for model_name, cols in columns_to_merge.items():
133
+ if len(cols) > 1:
134
+ df[model_name] = df[cols].max(axis=1)
135
+ df.drop(columns=cols, inplace=True)
136
+ else:
137
+ df.rename(columns={cols[0]: model_name}, inplace=True)
138
 
139
  if df.empty:
140
  raise ValueError(f"No results found for models {models} on tasks {tasks}")
 
151
  max_dataframe = task_results_df.groupby(task_col_name).max()
152
  high_model_performance_tasks = []
153
 
154
+ model_select_colum = model if model in df.columns else f"{model}__{new_model_revision}"
155
  if not max_dataframe.empty:
156
  for task_name, row in max_dataframe.iterrows():
157
  df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
158
+ model_score = df.loc[df[task_col_name] == task_name, model_select_colum].values[0]
159
  if model_score > row["score"]:
160
  high_model_performance_tasks.append(task_name)
161
 
162
  averages: dict[str, float | None] = {}
163
+ index_columns = defaultdict(list)
164
+ # models with revisions if exists
165
+ for col in df.columns:
166
+ index_columns[col.split("__")[0]].append(col)
167
  for col in models + [max_col_name]:
168
+ available_columns = index_columns.get(col)
169
+ if available_columns is None:
170
  continue
171
+ for cur_col in available_columns:
172
+ numeric = pd.to_numeric(df[cur_col], errors="coerce")
173
+ avg = numeric.mean()
174
+ averages[cur_col] = avg if not pd.isna(avg) else None
175
 
176
  avg_row = pd.DataFrame(
177
  {
 
203
  if pd.notna(row[max_col]):
204
  result_df.at[idx, max_col] = f"**{result_df.at[idx, max_col]}**"
205
 
206
+ # add revisions row if at least one column has revision
207
+ revisions = []
208
+ new_df_columns = []
209
+ at_least_one_revision = False
210
+ for col in result_df.columns:
211
+ if "__" in col:
212
+ at_least_one_revision = True
213
+ model_name, revision = col.split("__")
214
+ revisions.append(revision)
215
+ new_df_columns.append(model_name)
216
+ elif col == "task_name":
217
+ revisions.append("**Revisions**")
218
+ new_df_columns.append(col)
219
+ else:
220
+ revisions.append("")
221
+ new_df_columns.append(col)
222
+
223
+ if at_least_one_revision:
224
+ # add row with revisions after the header
225
+ revisions_row = pd.DataFrame(
226
+ {col: [rev] for col, rev in zip(result_df.columns, revisions)}
227
+ )
228
+ result_df = pd.concat([revisions_row, result_df], ignore_index=True).reset_index(drop=True)
229
+ result_df.columns = new_df_columns
230
+
231
  return result_df
232
 
233
 
 
240
  all_tasks = sorted(
241
  {t.metadata.name for tasks in model_tasks.values() for t in tasks}
242
  )
243
+ new_models = [model_name for model_name, revision in model_tasks.keys()]
244
 
245
  parts: list[str] = [
246
  "# Model Results Comparison",
 
251
  "",
252
  ]
253
 
254
+ for (model_name, revision), tasks in model_tasks.items():
255
  parts.append(f"## Results for `{model_name}`")
256
 
257
  df, high_model_performance_tasks = create_comparison_table(
258
+ model_name, revision, tasks, reference_models, new_models
259
  )
260
  bold_df = highlight_max_bold(df)
261
  parts.append(bold_df.to_markdown(index=False))