don't add model results to max result (#252)
Browse files* don't add model results to max result
* remove models results from same PR
* Use scores only from same splits and subsets
scripts/create_pr_results_comment.py
CHANGED
|
@@ -23,16 +23,17 @@ from __future__ import annotations
|
|
| 23 |
|
| 24 |
import argparse
|
| 25 |
import json
|
|
|
|
| 26 |
import os
|
| 27 |
import subprocess
|
| 28 |
-
import logging
|
| 29 |
from collections import defaultdict
|
| 30 |
from pathlib import Path
|
| 31 |
|
| 32 |
import mteb
|
| 33 |
import pandas as pd
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
|
| 37 |
# Default reference models to compare against
|
| 38 |
REFERENCE_MODELS: list[str] = [
|
|
@@ -63,14 +64,14 @@ def get_diff_from_main() -> list[str]:
|
|
| 63 |
|
| 64 |
def extract_new_models_and_tasks(
|
| 65 |
differences: list[str],
|
| 66 |
-
) -> dict[ModelName, list[
|
| 67 |
diffs = [repo_path / diff for diff in differences]
|
| 68 |
result_diffs = filter(
|
| 69 |
lambda p: p.exists() and p.suffix == ".json" and p.name != "model_meta.json",
|
| 70 |
diffs,
|
| 71 |
)
|
| 72 |
|
| 73 |
-
|
| 74 |
for diff in result_diffs:
|
| 75 |
model_meta = diff.parent / "model_meta.json"
|
| 76 |
task_name = diff.stem
|
|
@@ -78,13 +79,27 @@ def extract_new_models_and_tasks(
|
|
| 78 |
with model_meta.open("r") as f:
|
| 79 |
model_name = json.load(f)["name"]
|
| 80 |
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
|
| 86 |
def create_comparison_table(
|
| 87 |
-
model:
|
|
|
|
|
|
|
|
|
|
| 88 |
) -> pd.DataFrame:
|
| 89 |
models = [model] + reference_models
|
| 90 |
max_col_name = "Max result"
|
|
@@ -104,6 +119,8 @@ def create_comparison_table(
|
|
| 104 |
task_results_df = task_results.to_dataframe(format="long")
|
| 105 |
# some scores are in percentage, convert them to decimal
|
| 106 |
task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
|
|
|
|
|
|
|
| 107 |
max_dataframe = task_results_df.groupby(task_col_name).max()
|
| 108 |
if not max_dataframe.empty:
|
| 109 |
for task_name, row in max_dataframe.iterrows():
|
|
@@ -133,7 +150,7 @@ def highlight_max_bold(
|
|
| 133 |
for col in result_df.columns:
|
| 134 |
if col not in exclude_cols:
|
| 135 |
result_df[col] = result_df[col].apply(
|
| 136 |
-
lambda x: f"{x:.
|
| 137 |
if isinstance(x, (int, float)) and pd.notna(x)
|
| 138 |
else x
|
| 139 |
)
|
|
@@ -151,12 +168,14 @@ def highlight_max_bold(
|
|
| 151 |
|
| 152 |
|
| 153 |
def generate_markdown_content(
|
| 154 |
-
model_tasks: dict[
|
| 155 |
) -> str:
|
| 156 |
if not model_tasks:
|
| 157 |
return "# Model Results Comparison\n\nNo new model results found in this PR."
|
| 158 |
|
| 159 |
-
all_tasks = sorted(
|
|
|
|
|
|
|
| 160 |
new_models = list(model_tasks.keys())
|
| 161 |
|
| 162 |
parts: list[str] = [
|
|
@@ -171,7 +190,9 @@ def generate_markdown_content(
|
|
| 171 |
for model_name, tasks in model_tasks.items():
|
| 172 |
parts.append(f"## Results for `{model_name}`")
|
| 173 |
|
| 174 |
-
df = create_comparison_table(
|
|
|
|
|
|
|
| 175 |
bold_df = highlight_max_bold(df)
|
| 176 |
parts.append(bold_df.to_markdown(index=False))
|
| 177 |
|
|
|
|
| 23 |
|
| 24 |
import argparse
|
| 25 |
import json
|
| 26 |
+
import logging
|
| 27 |
import os
|
| 28 |
import subprocess
|
|
|
|
| 29 |
from collections import defaultdict
|
| 30 |
from pathlib import Path
|
| 31 |
|
| 32 |
import mteb
|
| 33 |
import pandas as pd
|
| 34 |
+
from mteb.abstasks.AbsTask import AbsTask
|
| 35 |
|
| 36 |
+
ModelName = str
|
| 37 |
|
| 38 |
# Default reference models to compare against
|
| 39 |
REFERENCE_MODELS: list[str] = [
|
|
|
|
| 64 |
|
| 65 |
def extract_new_models_and_tasks(
|
| 66 |
differences: list[str],
|
| 67 |
+
) -> dict[ModelName, list[AbsTask]]:
|
| 68 |
diffs = [repo_path / diff for diff in differences]
|
| 69 |
result_diffs = filter(
|
| 70 |
lambda p: p.exists() and p.suffix == ".json" and p.name != "model_meta.json",
|
| 71 |
diffs,
|
| 72 |
)
|
| 73 |
|
| 74 |
+
models_tasks = defaultdict(list)
|
| 75 |
for diff in result_diffs:
|
| 76 |
model_meta = diff.parent / "model_meta.json"
|
| 77 |
task_name = diff.stem
|
|
|
|
| 79 |
with model_meta.open("r") as f:
|
| 80 |
model_name = json.load(f)["name"]
|
| 81 |
|
| 82 |
+
with diff.open("r") as f:
|
| 83 |
+
task_result = json.load(f)
|
| 84 |
+
|
| 85 |
+
splits = set()
|
| 86 |
+
subsets = set()
|
| 87 |
+
for split_name, split_results in task_result.get("scores", {}).items():
|
| 88 |
+
splits.add(split_name)
|
| 89 |
+
for subset_result in split_results:
|
| 90 |
+
subsets.add(subset_result["hf_subset"])
|
| 91 |
|
| 92 |
+
task = mteb.get_task(task_name, eval_splits=list(splits), hf_subsets=list(subsets))
|
| 93 |
+
models_tasks[model_name].append(task)
|
| 94 |
+
|
| 95 |
+
return models_tasks
|
| 96 |
|
| 97 |
|
| 98 |
def create_comparison_table(
|
| 99 |
+
model: ModelName,
|
| 100 |
+
tasks: list[AbsTask],
|
| 101 |
+
reference_models: list[ModelName],
|
| 102 |
+
models_in_pr: list[ModelName],
|
| 103 |
) -> pd.DataFrame:
|
| 104 |
models = [model] + reference_models
|
| 105 |
max_col_name = "Max result"
|
|
|
|
| 119 |
task_results_df = task_results.to_dataframe(format="long")
|
| 120 |
# some scores are in percentage, convert them to decimal
|
| 121 |
task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
|
| 122 |
+
# remove results of models in this pr from max score calculation
|
| 123 |
+
task_results_df = task_results_df[~task_results_df["model_name"].isin(models_in_pr)]
|
| 124 |
max_dataframe = task_results_df.groupby(task_col_name).max()
|
| 125 |
if not max_dataframe.empty:
|
| 126 |
for task_name, row in max_dataframe.iterrows():
|
|
|
|
| 150 |
for col in result_df.columns:
|
| 151 |
if col not in exclude_cols:
|
| 152 |
result_df[col] = result_df[col].apply(
|
| 153 |
+
lambda x: f"{x:.4f}"
|
| 154 |
if isinstance(x, (int, float)) and pd.notna(x)
|
| 155 |
else x
|
| 156 |
)
|
|
|
|
| 168 |
|
| 169 |
|
| 170 |
def generate_markdown_content(
|
| 171 |
+
model_tasks: dict[ModelName, list[AbsTask]], reference_models: list[str]
|
| 172 |
) -> str:
|
| 173 |
if not model_tasks:
|
| 174 |
return "# Model Results Comparison\n\nNo new model results found in this PR."
|
| 175 |
|
| 176 |
+
all_tasks = sorted(
|
| 177 |
+
{t.metadata.name for tasks in model_tasks.values() for t in tasks}
|
| 178 |
+
)
|
| 179 |
new_models = list(model_tasks.keys())
|
| 180 |
|
| 181 |
parts: list[str] = [
|
|
|
|
| 190 |
for model_name, tasks in model_tasks.items():
|
| 191 |
parts.append(f"## Results for `{model_name}`")
|
| 192 |
|
| 193 |
+
df = create_comparison_table(
|
| 194 |
+
model_name, tasks, reference_models, list(model_tasks.keys())
|
| 195 |
+
)
|
| 196 |
bold_df = highlight_max_bold(df)
|
| 197 |
parts.append(bold_df.to_markdown(index=False))
|
| 198 |
|