add high tasks (#272)
Browse files
scripts/create_pr_results_comment.py
CHANGED
|
@@ -89,7 +89,9 @@ def extract_new_models_and_tasks(
|
|
| 89 |
for subset_result in split_results:
|
| 90 |
subsets.add(subset_result["hf_subset"])
|
| 91 |
|
| 92 |
-
task = mteb.get_task(
|
|
|
|
|
|
|
| 93 |
models_tasks[model_name].append(task)
|
| 94 |
|
| 95 |
return models_tasks
|
|
@@ -100,7 +102,7 @@ def create_comparison_table(
|
|
| 100 |
tasks: list[AbsTask],
|
| 101 |
reference_models: list[ModelName],
|
| 102 |
models_in_pr: list[ModelName],
|
| 103 |
-
) -> pd.DataFrame:
|
| 104 |
models = [model] + reference_models
|
| 105 |
max_col_name = "Max result"
|
| 106 |
task_col_name = "task_name"
|
|
@@ -122,9 +124,14 @@ def create_comparison_table(
|
|
| 122 |
# remove results of models in this pr from max score calculation
|
| 123 |
task_results_df = task_results_df[~task_results_df["model_name"].isin(models_in_pr)]
|
| 124 |
max_dataframe = task_results_df.groupby(task_col_name).max()
|
|
|
|
|
|
|
| 125 |
if not max_dataframe.empty:
|
| 126 |
for task_name, row in max_dataframe.iterrows():
|
| 127 |
df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
averages: dict[str, float | None] = {}
|
| 130 |
for col in models + [max_col_name]:
|
|
@@ -140,7 +147,7 @@ def create_comparison_table(
|
|
| 140 |
**{col: [val] for col, val in averages.items()},
|
| 141 |
}
|
| 142 |
)
|
| 143 |
-
return pd.concat([df, avg_row], ignore_index=True)
|
| 144 |
|
| 145 |
|
| 146 |
def highlight_max_bold(
|
|
@@ -190,12 +197,22 @@ def generate_markdown_content(
|
|
| 190 |
for model_name, tasks in model_tasks.items():
|
| 191 |
parts.append(f"## Results for `{model_name}`")
|
| 192 |
|
| 193 |
-
df = create_comparison_table(
|
| 194 |
model_name, tasks, reference_models, list(model_tasks.keys())
|
| 195 |
)
|
| 196 |
bold_df = highlight_max_bold(df)
|
| 197 |
parts.append(bold_df.to_markdown(index=False))
|
| 198 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
parts.extend(["", "---", ""])
|
| 200 |
|
| 201 |
return "\n".join(parts)
|
|
|
|
| 89 |
for subset_result in split_results:
|
| 90 |
subsets.add(subset_result["hf_subset"])
|
| 91 |
|
| 92 |
+
task = mteb.get_task(
|
| 93 |
+
task_name, eval_splits=list(splits), hf_subsets=list(subsets)
|
| 94 |
+
)
|
| 95 |
models_tasks[model_name].append(task)
|
| 96 |
|
| 97 |
return models_tasks
|
|
|
|
| 102 |
tasks: list[AbsTask],
|
| 103 |
reference_models: list[ModelName],
|
| 104 |
models_in_pr: list[ModelName],
|
| 105 |
+
) -> tuple[pd.DataFrame, list[str]]:
|
| 106 |
models = [model] + reference_models
|
| 107 |
max_col_name = "Max result"
|
| 108 |
task_col_name = "task_name"
|
|
|
|
| 124 |
# remove results of models in this pr from max score calculation
|
| 125 |
task_results_df = task_results_df[~task_results_df["model_name"].isin(models_in_pr)]
|
| 126 |
max_dataframe = task_results_df.groupby(task_col_name).max()
|
| 127 |
+
high_model_performance_tasks = []
|
| 128 |
+
|
| 129 |
if not max_dataframe.empty:
|
| 130 |
for task_name, row in max_dataframe.iterrows():
|
| 131 |
df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
|
| 132 |
+
model_score = df.loc[df[task_col_name] == task_name, model].values[0]
|
| 133 |
+
if model_score > row["score"]:
|
| 134 |
+
high_model_performance_tasks.append(task_name)
|
| 135 |
|
| 136 |
averages: dict[str, float | None] = {}
|
| 137 |
for col in models + [max_col_name]:
|
|
|
|
| 147 |
**{col: [val] for col, val in averages.items()},
|
| 148 |
}
|
| 149 |
)
|
| 150 |
+
return pd.concat([df, avg_row], ignore_index=True), high_model_performance_tasks
|
| 151 |
|
| 152 |
|
| 153 |
def highlight_max_bold(
|
|
|
|
| 197 |
for model_name, tasks in model_tasks.items():
|
| 198 |
parts.append(f"## Results for `{model_name}`")
|
| 199 |
|
| 200 |
+
df, high_model_performance_tasks = create_comparison_table(
|
| 201 |
model_name, tasks, reference_models, list(model_tasks.keys())
|
| 202 |
)
|
| 203 |
bold_df = highlight_max_bold(df)
|
| 204 |
parts.append(bold_df.to_markdown(index=False))
|
| 205 |
|
| 206 |
+
if len(high_model_performance_tasks) > 0:
|
| 207 |
+
parts.extend(
|
| 208 |
+
[
|
| 209 |
+
"",
|
| 210 |
+
"Model have high performance on these tasks: "
|
| 211 |
+
+ ",".join([f"`{task}`" for task in high_model_performance_tasks]),
|
| 212 |
+
"",
|
| 213 |
+
]
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
parts.extend(["", "---", ""])
|
| 217 |
|
| 218 |
return "\n".join(parts)
|