Samoed commited on
Commit
cff1581
·
unverified ·
1 Parent(s): 4918ca7

Add max model name to table with results (#367)

Browse files

* change result

* add name of max model

pyproject.toml CHANGED
@@ -3,10 +3,16 @@ name = "results"
3
  version = "0.1.0"
4
  description = "The result repository for mteb"
5
  readme = "README.md"
6
- requires-python = ">=3.10,<3.13" # pytrec-eval-terrier does not compile for 3.13
7
- dependencies = ["mteb>=1.13.0"]
8
 
9
  [dependency-groups]
10
  dev = [
11
  "pytest>=8.3.4",
12
  ]
 
 
 
 
 
 
 
3
  version = "0.1.0"
4
  description = "The result repository for mteb"
5
  readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = ["mteb>=2.0"]
8
 
9
  [dependency-groups]
10
  dev = [
11
  "pytest>=8.3.4",
12
  ]
13
+ lint = [
14
+ "ruff>=0.14.9",
15
+ ]
16
+ pr-comment = [
17
+ "tabulate>=0.9.0",
18
+ ]
results/ai-forever__FRIDA/7292217af9a9e6dbf07048f76b434ad1e2aa8b76/HeadlineClassification.json CHANGED
@@ -70,4 +70,4 @@
70
  },
71
  "evaluation_time": 17.08301067352295,
72
  "kg_co2_emissions": null
73
- }
 
70
  },
71
  "evaluation_time": 17.08301067352295,
72
  "kg_co2_emissions": null
73
+ }
scripts/create_pr_results_comment.py CHANGED
@@ -109,6 +109,7 @@ def create_comparison_table(
109
  ) -> tuple[pd.DataFrame, list[str]]:
110
  models = [model] + reference_models
111
  max_col_name = "Max result"
 
112
  task_col_name = "task_name"
113
  results = cache.load_results(models=models, tasks=tasks)
114
  df = results.to_dataframe(include_model_revision=True)
@@ -140,6 +141,7 @@ def create_comparison_table(
140
  raise ValueError(f"No results found for models {models} on tasks {tasks}")
141
 
142
  df[max_col_name] = None
 
143
  task_results = cache.load_results(tasks=tasks)
144
  task_results = task_results.join_revisions()
145
 
@@ -148,14 +150,24 @@ def create_comparison_table(
148
  task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
149
  # remove results of models in this pr from max score calculation
150
  task_results_df = task_results_df[~task_results_df["model_name"].isin(models_in_pr)]
151
- max_dataframe = task_results_df.groupby(task_col_name).max()
 
 
152
  high_model_performance_tasks = []
153
 
154
- model_select_colum = model if model in df.columns else f"{model}__{new_model_revision}"
 
 
155
  if not max_dataframe.empty:
156
- for task_name, row in max_dataframe.iterrows():
 
157
  df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
158
- model_score = df.loc[df[task_col_name] == task_name, model_select_colum].values[0]
 
 
 
 
 
159
  if model_score > row["score"]:
160
  high_model_performance_tasks.append(task_name)
161
 
@@ -225,7 +237,9 @@ def highlight_max_bold(
225
  revisions_row = pd.DataFrame(
226
  {col: [rev] for col, rev in zip(result_df.columns, revisions)}
227
  )
228
- result_df = pd.concat([revisions_row, result_df], ignore_index=True).reset_index(drop=True)
 
 
229
  result_df.columns = new_df_columns
230
 
231
  return result_df
 
109
  ) -> tuple[pd.DataFrame, list[str]]:
110
  models = [model] + reference_models
111
  max_col_name = "Max result"
112
+ max_model_col_name = "Model with max result"
113
  task_col_name = "task_name"
114
  results = cache.load_results(models=models, tasks=tasks)
115
  df = results.to_dataframe(include_model_revision=True)
 
141
  raise ValueError(f"No results found for models {models} on tasks {tasks}")
142
 
143
  df[max_col_name] = None
144
+ df[max_model_col_name] = ''
145
  task_results = cache.load_results(tasks=tasks)
146
  task_results = task_results.join_revisions()
147
 
 
150
  task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
151
  # remove results of models in this pr from max score calculation
152
  task_results_df = task_results_df[~task_results_df["model_name"].isin(models_in_pr)]
153
+ max_dataframe = task_results_df.sort_values(
154
+ "score", ascending=False
155
+ ).drop_duplicates(subset=task_col_name, keep="first")
156
  high_model_performance_tasks = []
157
 
158
+ model_select_colum = (
159
+ model if model in df.columns else f"{model}__{new_model_revision}"
160
+ )
161
  if not max_dataframe.empty:
162
+ for _, row in max_dataframe.iterrows():
163
+ task_name = row["task_name"]
164
  df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
165
+ df.loc[df[task_col_name] == task_name, max_model_col_name] = row[
166
+ "model_name"
167
+ ]
168
+ model_score = df.loc[
169
+ df[task_col_name] == task_name, model_select_colum
170
+ ].values[0]
171
  if model_score > row["score"]:
172
  high_model_performance_tasks.append(task_name)
173
 
 
237
  revisions_row = pd.DataFrame(
238
  {col: [rev] for col, rev in zip(result_df.columns, revisions)}
239
  )
240
+ result_df = pd.concat(
241
+ [revisions_row, result_df], ignore_index=True
242
+ ).reset_index(drop=True)
243
  result_df.columns = new_df_columns
244
 
245
  return result_df