Ayush6 Samoed commited on
Commit
9d1796b
·
unverified ·
1 Parent(s): fb15ff7

Add training datasets column to table with results (#380)

Browse files

* Add training datasets column to table with results

* fix column naming

* fix logic

* change results for testing

* fix error

* revise logic

* convert to boolean

* fix training dataset content

* address comments

* remove customization

* fix error

* Update scripts/create_pr_results_comment.py

---------

Co-authored-by: Roman Solomatin <samoed.roman@gmail.com>

results/google__gemini-embedding-001/1/AILACasedocs.json CHANGED
@@ -155,4 +155,5 @@
155
  },
156
  "evaluation_time": 70.23574590682983,
157
  "kg_co2_emissions": null
 
158
  }
 
155
  },
156
  "evaluation_time": 70.23574590682983,
157
  "kg_co2_emissions": null
158
+
159
  }
scripts/create_pr_results_comment.py CHANGED
@@ -106,11 +106,13 @@ def create_comparison_table(
106
  tasks: list[AbsTask],
107
  reference_models: list[ModelName],
108
  models_in_pr: list[ModelName],
109
- ) -> tuple[pd.DataFrame, list[str]]:
110
  models = [model] + reference_models
111
  max_col_name = "Max result"
112
  max_model_col_name = "Model with max result"
113
  task_col_name = "task_name"
 
 
114
  results = cache.load_results(models=models, tasks=tasks)
115
  df = results.to_dataframe(include_model_revision=True)
116
  new_df_columns = []
@@ -142,6 +144,7 @@ def create_comparison_table(
142
 
143
  df[max_col_name] = None
144
  df[max_model_col_name] = ''
 
145
  task_results = cache.load_results(tasks=tasks)
146
  task_results = task_results.join_revisions()
147
 
@@ -150,12 +153,22 @@ def create_comparison_table(
150
  task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
151
  # remove results of models in this pr from max score calculation
152
  task_results_df = task_results_df[~task_results_df["model_name"].isin(models_in_pr)]
 
 
 
 
 
 
 
 
 
 
153
  max_dataframe = task_results_df.sort_values(
154
  "score", ascending=False
155
  ).drop_duplicates(subset=task_col_name, keep="first")
156
  high_model_performance_tasks = []
157
 
158
- model_select_colum = (
159
  model if model in df.columns else f"{model}__{new_model_revision}"
160
  )
161
  if not max_dataframe.empty:
@@ -166,7 +179,7 @@ def create_comparison_table(
166
  "model_name"
167
  ]
168
  model_score = df.loc[
169
- df[task_col_name] == task_name, model_select_colum
170
  ].values[0]
171
  if model_score > row["score"]:
172
  high_model_performance_tasks.append(task_name)
@@ -175,7 +188,8 @@ def create_comparison_table(
175
  index_columns = defaultdict(list)
176
  # models with revisions if exists
177
  for col in df.columns:
178
- index_columns[col.split("__")[0]].append(col)
 
179
  for col in models + [max_col_name]:
180
  available_columns = index_columns.get(col)
181
  if available_columns is None:
@@ -188,18 +202,20 @@ def create_comparison_table(
188
  avg_row = pd.DataFrame(
189
  {
190
  task_col_name: ["**Average**"],
 
191
  **{col: [val] for col, val in averages.items()},
192
  }
193
  )
194
- return pd.concat([df, avg_row], ignore_index=True), high_model_performance_tasks
195
 
196
 
197
  def highlight_max_bold(
198
  df: pd.DataFrame, exclude_cols: list[str] = ["task_name"]
199
  ) -> pd.DataFrame:
200
  result_df = df.copy()
 
201
  for col in result_df.columns:
202
- if col not in exclude_cols:
203
  result_df[col] = result_df[col].apply(
204
  lambda x: f"{x:.4f}"
205
  if isinstance(x, (int, float)) and pd.notna(x)
@@ -268,7 +284,7 @@ def generate_markdown_content(
268
  for (model_name, revision), tasks in model_tasks.items():
269
  parts.append(f"## Results for `{model_name}`")
270
 
271
- df, high_model_performance_tasks = create_comparison_table(
272
  model_name, revision, tasks, reference_models, new_models
273
  )
274
  bold_df = highlight_max_bold(df)
@@ -283,6 +299,11 @@ def generate_markdown_content(
283
  "",
284
  ]
285
  )
 
 
 
 
 
286
 
287
  parts.extend(["", "---", ""])
288
 
 
106
  tasks: list[AbsTask],
107
  reference_models: list[ModelName],
108
  models_in_pr: list[ModelName],
109
+ ) -> tuple[pd.DataFrame, list[str], set[str]]:
110
  models = [model] + reference_models
111
  max_col_name = "Max result"
112
  max_model_col_name = "Model with max result"
113
  task_col_name = "task_name"
114
+ in_training_col_name = "In Training Data"
115
+
116
  results = cache.load_results(models=models, tasks=tasks)
117
  df = results.to_dataframe(include_model_revision=True)
118
  new_df_columns = []
 
144
 
145
  df[max_col_name] = None
146
  df[max_model_col_name] = ''
147
+ df[in_training_col_name] = False
148
  task_results = cache.load_results(tasks=tasks)
149
  task_results = task_results.join_revisions()
150
 
 
153
  task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
154
  # remove results of models in this pr from max score calculation
155
  task_results_df = task_results_df[~task_results_df["model_name"].isin(models_in_pr)]
156
+
157
+ model_meta = mteb.get_model_meta(model)
158
+ all_training_datasets: set[str] = model_meta.get_training_datasets()
159
+
160
+ if all_training_datasets:
161
+ df.loc[
162
+ df[task_col_name].isin(all_training_datasets),
163
+ in_training_col_name
164
+ ] = True
165
+
166
  max_dataframe = task_results_df.sort_values(
167
  "score", ascending=False
168
  ).drop_duplicates(subset=task_col_name, keep="first")
169
  high_model_performance_tasks = []
170
 
171
+ model_select_column = (
172
  model if model in df.columns else f"{model}__{new_model_revision}"
173
  )
174
  if not max_dataframe.empty:
 
179
  "model_name"
180
  ]
181
  model_score = df.loc[
182
+ df[task_col_name] == task_name, model_select_column
183
  ].values[0]
184
  if model_score > row["score"]:
185
  high_model_performance_tasks.append(task_name)
 
188
  index_columns = defaultdict(list)
189
  # models with revisions if exists
190
  for col in df.columns:
191
+ if col != in_training_col_name:
192
+ index_columns[col.split("__")[0]].append(col)
193
  for col in models + [max_col_name]:
194
  available_columns = index_columns.get(col)
195
  if available_columns is None:
 
202
  avg_row = pd.DataFrame(
203
  {
204
  task_col_name: ["**Average**"],
205
+ in_training_col_name: ["-"],
206
  **{col: [val] for col, val in averages.items()},
207
  }
208
  )
209
+ return pd.concat([df, avg_row], ignore_index=True), high_model_performance_tasks, all_training_datasets
210
 
211
 
212
  def highlight_max_bold(
213
  df: pd.DataFrame, exclude_cols: list[str] = ["task_name"]
214
  ) -> pd.DataFrame:
215
  result_df = df.copy()
216
+
217
  for col in result_df.columns:
218
+ if col not in exclude_cols and col != "In Training Data":
219
  result_df[col] = result_df[col].apply(
220
  lambda x: f"{x:.4f}"
221
  if isinstance(x, (int, float)) and pd.notna(x)
 
284
  for (model_name, revision), tasks in model_tasks.items():
285
  parts.append(f"## Results for `{model_name}`")
286
 
287
+ df, high_model_performance_tasks, all_training_datasets = create_comparison_table(
288
  model_name, revision, tasks, reference_models, new_models
289
  )
290
  bold_df = highlight_max_bold(df)
 
299
  "",
300
  ]
301
  )
302
+
303
+ if all_training_datasets:
304
+ datasets_list = ", ".join(f"`{d}`" for d in sorted(all_training_datasets))
305
+ parts.append(f"**Training datasets:** {datasets_list}")
306
+ parts.extend(["", ""])
307
 
308
  parts.extend(["", "---", ""])
309