Kenneth Enevoldsen commited on
Commit
bbc4e83
·
unverified ·
1 Parent(s): 4004120

Add script for computing results table for PRs (#210)

Browse files

This add the script for computing the PR results table comment.

In another PR we could turn this into and action like `@gitbot compare intfloat/multilingual-e5-large myorg/my-new-model`

but this is a good start.

Files changed (1) hide show
  1. scripts/create_pr_results_comment.py +156 -0
scripts/create_pr_results_comment.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Script to generate a Markdown comparison table for new model results in a pull request.
3
+
4
+ Usage:
5
+ gh pr checkout {pr-number}
6
+ python scripts/create_results_pr_comment.py [--models MODEL1 MODEL2 ...]
7
+
8
+ Description:
9
+ - Compares new model results (added in the current PR) against reference models.
10
+ - Outputs a Markdown table with results for each new model and highlights the best scores.
11
+ - By default, compares against: intfloat/multilingual-e5-large and google/gemini-embedding-001.
12
+ - You can specify reference models with the --models argument.
13
+
14
+ Arguments:
15
+ --models: List of reference models to compare against (default: intfloat/multilingual-e5-large google/gemini-embedding-001)
16
+
17
+ Example:
18
+ python scripts/create_results_pr_comment.py --models intfloat/multilingual-e5-large myorg/my-new-model
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import argparse
24
+ import json
25
+ import os
26
+ import subprocess
27
+ from collections import defaultdict
28
+ from pathlib import Path
29
+
30
+ import mteb
31
+ import pandas as pd
32
+
33
+ TaskName, ModelName = str, str
34
+
35
+
36
+ repo_path = Path(__file__).parents[1]
37
+ results_path = repo_path / "results"
38
+
39
+ os.environ["MTEB_CACHE"] = str(repo_path.parent)
40
+
41
+
42
+ default_reference_models = [
43
+ "intfloat/multilingual-e5-large",
44
+ "google/gemini-embedding-001",
45
+ ]
46
+
47
+
48
+ def get_diff_from_main() -> list[str]:
49
+ current_rev, origin_rev = subprocess.run(
50
+ ["git", "rev-parse", "main", "origin/main"],
51
+ cwd=repo_path,
52
+ capture_output=True,
53
+ check=True,
54
+ text=True,
55
+ ).stdout.splitlines()
56
+
57
+ if current_rev == origin_rev:
58
+ raise ValueError(
59
+ "Your main branch is not up-to-date, please run `git fetch origin main`"
60
+ )
61
+
62
+ differences = subprocess.run(
63
+ ["git", "diff", "--name-only", "origin/main...HEAD"],
64
+ cwd=repo_path,
65
+ text=True,
66
+ capture_output=True,
67
+ ).stdout.splitlines()
68
+
69
+ return differences
70
+
71
+
72
+ def extract_new_models_and_tasks(
73
+ differences: list[str],
74
+ ) -> dict[ModelName, list[TaskName]]:
75
+ diffs = [repo_path / diff for diff in differences]
76
+ result_diffs = filter(
77
+ lambda p: p.exists() and p.suffix == ".json" and p.name != "model_meta.json",
78
+ diffs,
79
+ )
80
+
81
+ models = defaultdict(list)
82
+ for diff in result_diffs:
83
+ model_meta = diff.parent / "model_meta.json"
84
+ task_name = diff.stem
85
+
86
+ with model_meta.open("r") as f:
87
+ model_name = json.load(f)["name"]
88
+
89
+ models[model_name].append(task_name)
90
+
91
+ return models
92
+
93
+
94
+ def create_comparison_table(models: list[str], tasks: list[str]) -> pd.DataFrame:
95
+ results = mteb.load_results(models=models, tasks=tasks, download_latest=False)
96
+ results = results.join_revisions()
97
+ df = results.to_dataframe()
98
+
99
+ # compute average pr. columns
100
+ model_names = [c for c in df.columns if c != "task_name"]
101
+
102
+ row = pd.DataFrame(
103
+ {
104
+ "task_name": ["**Average**"],
105
+ **{
106
+ model: df[model].mean() if model != "task_name" else None
107
+ for model in model_names
108
+ },
109
+ }
110
+ )
111
+ df = pd.concat([df, row], ignore_index=True)
112
+ return df
113
+
114
+
115
+ def highlight_max_bold(df, exclude_cols=["task_name"]):
116
+ # result_df = df.copy().astype(str)
117
+ # only 2 decimal places except for the excluded columns
118
+ result_df = df.copy()
119
+ result_df = result_df.applymap(lambda x: f"{x:.2f}" if isinstance(x, float) else x)
120
+ tmp_df = df.copy()
121
+ tmp_df = tmp_df.drop(columns=exclude_cols)
122
+ for idx in df.index:
123
+ max_col = tmp_df.loc[idx].idxmax()
124
+ result_df.loc[idx, max_col] = f"**{result_df.loc[idx, max_col]}**"
125
+
126
+ return result_df
127
+
128
+
129
+ def create_argparse() -> argparse.ArgumentParser:
130
+ parser = argparse.ArgumentParser(
131
+ description="Create PR comment with results comparison."
132
+ )
133
+ parser.add_argument(
134
+ "--models",
135
+ nargs="+",
136
+ default=default_reference_models,
137
+ help="List of reference models to compare against (default: %(default)s)",
138
+ )
139
+ return parser
140
+
141
+
142
+ def main(reference_models: list[str]):
143
+ diff = get_diff_from_main()
144
+ new_additions = extract_new_models_and_tasks(diff)
145
+
146
+ for model, tasks in new_additions.items():
147
+ print(f"**Results for `{model}`**")
148
+ df = create_comparison_table(models=reference_models + [model], tasks=tasks)
149
+ bold_df = highlight_max_bold(df)
150
+ print(bold_df.to_markdown(index=False))
151
+
152
+
153
+ if __name__ == "__main__":
154
+ parser = create_argparse()
155
+ args = parser.parse_args()
156
+ main(reference_models=args.models)