Ayush6 Roman Solomatin KennethEnevoldsen commited on
Commit
141ba02
·
unverified ·
1 Parent(s): 9ba55b2

Create Results PR Comment for Results diff (#444)

Browse files

* Create Results PR Comment for Results diff

* Added 1 more results file for wide comparison

* Update argument and docstring in script

* Update command in yaml file

* update yaml file

* Revert results

* remove unnecessary model revision

* Updated table format and added it as a tests

* update table format

* Skip comment if nothing is change

* use TaskResult for results fetching

* Remove PR comment generation part and add to only test

* remove changes in yaml file

* Fix CLI command in comment

* fix cli example

* change main score extraction strategy

* Declare and used MTEB_SCORE_EPSILON variable

* modify results for checking tests

* Update tests/test_results_diff.py

Co-authored-by: Kenneth Enevoldsen <kenevoldsen@pm.me>

* Added correct fix

* correct namings

* Delete results/aari1995__German_Semantic_STS_V2/22912542b0ec7a7ef369837e28ffe6352a27afc9/AmazonCounterfactualClassification.json

* Revert "Delete results/aari1995__German_Semantic_STS_V2/22912542b0ec7a7ef369837e28ffe6352a27afc9/AmazonCounterfactualClassification.json"

This reverts commit 9fd40998199e80a1c6ee0fbae53becff76a6da42.

* Moved functions in test file

* correct import

* rollback results file

---------

Co-authored-by: Roman Solomatin <36135455+Samoed@users.noreply.github.com>
Co-authored-by: Kenneth Enevoldsen <kenevoldsen@pm.me>

scripts/create_pr_results_comment.py CHANGED
@@ -8,21 +8,23 @@ Usage:
8
  Description:
9
  - Compares new model results (added in the current PR) against reference models.
10
  - Outputs a Markdown file with results for each new model and highlights the best scores.
 
11
  - By default, compares against: intfloat/multilingual-e5-large and google/gemini-embedding-001.
12
  - You can specify reference models with the --models argument.
13
 
14
  Arguments:
15
  --reference-models: List of reference models to compare against (default: intfloat/multilingual-e5-large google/gemini-embedding-001)
16
- --output: Output markdown file path (default: model-comparison.md)
17
 
18
  Example:
19
- python scripts/create_pr_results_comment.py --models intfloat/multilingual-e5-large myorg/my-new-model
20
  """
21
 
22
  from __future__ import annotations
23
 
24
  import argparse
25
  import json
 
26
  import logging
27
  import subprocess
28
  from collections import defaultdict
@@ -51,7 +53,6 @@ repo_path = Path(__file__).parents[1]
51
 
52
  cache = ResultCache(repo_path)
53
 
54
-
55
  def get_diff_from_main() -> list[str]:
56
  differences = subprocess.run(
57
  ["git", "diff", "--name-only", "origin/main...HEAD"],
@@ -329,7 +330,7 @@ def create_argparse() -> argparse.ArgumentParser:
329
  "--output",
330
  type=Path,
331
  default=Path("model-comparison.md"),
332
- help="Output markdown file path",
333
  )
334
  return parser
335
 
@@ -338,10 +339,9 @@ def main(reference_models: list[str], output_path: Path) -> None:
338
  logger.info("Starting to create PR results comment...")
339
  logger.info(f"Using reference models: {', '.join(reference_models)}")
340
  diff = get_diff_from_main()
341
-
342
  model_tasks = extract_new_models_and_tasks(diff)
343
  markdown = generate_markdown_content(model_tasks, reference_models)
344
-
345
  output_path.parent.mkdir(parents=True, exist_ok=True)
346
  output_path.write_text(markdown)
347
 
@@ -349,4 +349,4 @@ def main(reference_models: list[str], output_path: Path) -> None:
349
  if __name__ == "__main__":
350
  parser = create_argparse()
351
  args = parser.parse_args()
352
- main(args.reference_models, args.output)
 
8
  Description:
9
  - Compares new model results (added in the current PR) against reference models.
10
  - Outputs a Markdown file with results for each new model and highlights the best scores.
11
+ - Also generates a table comparing old (base branch) vs new (PR) scores for updated result files.
12
  - By default, compares against: intfloat/multilingual-e5-large and google/gemini-embedding-001.
13
  - You can specify reference models with the --models argument.
14
 
15
  Arguments:
16
  --reference-models: List of reference models to compare against (default: intfloat/multilingual-e5-large google/gemini-embedding-001)
17
+ --output: Output markdown file for reference model comparison (default: model-comparison.md)
18
 
19
  Example:
20
+ python scripts/create_pr_results_comment.py --models intfloat/multilingual-e5-large myorg/my-new-model --output model-comparison.md
21
  """
22
 
23
  from __future__ import annotations
24
 
25
  import argparse
26
  import json
27
+ import os
28
  import logging
29
  import subprocess
30
  from collections import defaultdict
 
53
 
54
  cache = ResultCache(repo_path)
55
 
 
56
  def get_diff_from_main() -> list[str]:
57
  differences = subprocess.run(
58
  ["git", "diff", "--name-only", "origin/main...HEAD"],
 
330
  "--output",
331
  type=Path,
332
  default=Path("model-comparison.md"),
333
+ help="Output markdown file for reference model comparison (default: model-comparison.md)",
334
  )
335
  return parser
336
 
 
339
  logger.info("Starting to create PR results comment...")
340
  logger.info(f"Using reference models: {', '.join(reference_models)}")
341
  diff = get_diff_from_main()
342
+
343
  model_tasks = extract_new_models_and_tasks(diff)
344
  markdown = generate_markdown_content(model_tasks, reference_models)
 
345
  output_path.parent.mkdir(parents=True, exist_ok=True)
346
  output_path.write_text(markdown)
347
 
 
349
  if __name__ == "__main__":
350
  parser = create_argparse()
351
  args = parser.parse_args()
352
+ main(args.reference_models, args.output)
tests/test_results_diff.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for result difference validation.
3
+
4
+ This test module reuses functions from create_pr_results_comment.py to:
5
+ 1. Validate that main_score changes don't exceed configured thresholds
6
+ 2. Provide a summary of all main_score changes in the PR
7
+ """
8
+
9
+ import json
10
+ import os
11
+ import sys
12
+ import subprocess
13
+ from pathlib import Path
14
+ import pandas as pd
15
+ import pytest
16
+ from mteb import TaskResult
17
+
18
+ MTEB_SCORE_EPSILON=0.001
19
+ repo_path = Path(__file__).parents[1]
20
+
21
+ def get_base_ref() -> str:
22
+ """Get the base reference for comparison (PR_BASE_SHA env var or origin/main)."""
23
+ return os.getenv("PR_BASE_SHA", "origin/main")
24
+
25
+ def get_diff_from_main() -> list[str]:
26
+ differences = subprocess.run(
27
+ ["git", "diff", "--name-only", "origin/main...HEAD"],
28
+ cwd=repo_path,
29
+ text=True,
30
+ capture_output=True,
31
+ ).stdout.splitlines()
32
+
33
+ return differences
34
+
35
+ def load_json_from_git_ref(relative_path: str, git_ref: str) -> dict | None:
36
+ """Load a JSON file from a specific git reference."""
37
+ result = subprocess.run(
38
+ ["git", "show", f"{git_ref}:{relative_path}"],
39
+ cwd=repo_path,
40
+ text=True,
41
+ capture_output=True,
42
+ )
43
+ if result.returncode != 0 or not result.stdout.strip():
44
+ return None
45
+ try:
46
+ return json.loads(result.stdout)
47
+ except json.JSONDecodeError:
48
+ return None
49
+
50
+ def extract_main_score(task_result_dict: dict) -> dict[tuple[str, str], float]:
51
+ """
52
+ Extract main_score for each split/subset combination from task result.
53
+ No aggregation - returns the main_score value for each (split, subset) pair.
54
+
55
+ Returns:
56
+ Dict mapping (split, subset) tuples to their main_score value.
57
+ Example: {("test", "default"): 0.85, ("test", "en"): 0.90}
58
+ """
59
+ split_subset_scores: dict[tuple[str, str], float] = {}
60
+
61
+ try:
62
+ task_result = TaskResult.from_dict(task_result_dict)
63
+ filtered_result = task_result.only_main_score()
64
+
65
+ for split_name, split_scores in filtered_result.scores.items():
66
+ for subset_score in split_scores:
67
+ subset_name = subset_score.get("hf_subset")
68
+ main_score = subset_score.get("main_score")
69
+
70
+ if (subset_name is not None and main_score is not None
71
+ and not pd.isna(main_score)):
72
+ value = float(main_score)
73
+ if value > 1:
74
+ value /= 100
75
+ split_subset_scores[(split_name, subset_name)] = value
76
+
77
+ return split_subset_scores
78
+ except Exception:
79
+ return {}
80
+
81
+ def create_old_new_diff_table(differences: list[str], base_ref: str) -> pd.DataFrame:
82
+ """Create DataFrame comparing old and new main_score for each split/subset."""
83
+ columns = ["model_name", "task_name", "split", "subset", "old_revision", "old_value", "new_revision", "new_value", "delta", "pct_change"]
84
+ rows: list[dict] = []
85
+
86
+ for relative_path in differences:
87
+ path = repo_path / relative_path
88
+ if not path.exists() or path.suffix != ".json" or path.name == "model_meta.json":
89
+ continue
90
+
91
+ model_meta_path = path.parent / "model_meta.json"
92
+ task_name = path.stem
93
+
94
+ if not model_meta_path.exists():
95
+ continue
96
+
97
+ try:
98
+ with model_meta_path.open("r") as f:
99
+ model_meta = json.load(f)
100
+ model_name = model_meta["name"]
101
+ new_revision = model_meta["revision"]
102
+ except (json.JSONDecodeError, IOError, KeyError):
103
+ continue
104
+
105
+ old_json = load_json_from_git_ref(relative_path, base_ref)
106
+ if old_json is None:
107
+ continue
108
+
109
+ old_model_meta = load_json_from_git_ref(str(model_meta_path.relative_to(repo_path)), base_ref)
110
+ if old_model_meta is None:
111
+ old_revision = "unknown"
112
+ else:
113
+ try:
114
+ old_revision = old_model_meta.get("revision", "unknown")
115
+ except (AttributeError, TypeError):
116
+ old_revision = "unknown"
117
+
118
+ try:
119
+ with path.open("r") as f:
120
+ new_json = json.load(f)
121
+ except (json.JSONDecodeError, IOError):
122
+ continue
123
+
124
+ old_scores = extract_main_score(old_json)
125
+ new_scores = extract_main_score(new_json)
126
+
127
+ if not old_scores or not new_scores:
128
+ continue
129
+
130
+ for (split, subset), new_value in new_scores.items():
131
+ if (split, subset) not in old_scores:
132
+ continue
133
+
134
+ old_value = old_scores[(split, subset)]
135
+
136
+ delta = new_value - old_value
137
+ if delta == 0:
138
+ continue
139
+
140
+ pct_change = None if old_value == 0 else delta / old_value
141
+
142
+ rows.append({
143
+ "model_name": model_name,
144
+ "task_name": task_name,
145
+ "split": split,
146
+ "subset": subset,
147
+ "old_revision": old_revision,
148
+ "old_value": old_value,
149
+ "new_revision": new_revision,
150
+ "new_value": new_value,
151
+ "delta": delta,
152
+ "pct_change": pct_change,
153
+ })
154
+
155
+ if not rows:
156
+ return pd.DataFrame(columns=columns)
157
+
158
+ return pd.DataFrame(rows, columns=columns).sort_values(
159
+ ["model_name", "task_name", "split", "subset"]
160
+ )
161
+
162
+ def test_result_diffs_within_threshold():
163
+ """
164
+ Fail if any main_score delta exceeds configured thresholds.
165
+ """
166
+
167
+ base_ref = get_base_ref()
168
+ differences = get_diff_from_main()
169
+ print(differences)
170
+
171
+ # Skip test if no changes found
172
+ if not differences:
173
+ pytest.skip("No changes found between base and current branch")
174
+
175
+ diff_table = create_old_new_diff_table(differences, base_ref)
176
+
177
+ # Skip test if no comparable results found
178
+ if diff_table.empty:
179
+ pytest.skip("No comparable updated result files found")
180
+
181
+ violations = []
182
+ for _, row in diff_table.iterrows():
183
+ delta = abs(row["delta"])
184
+
185
+ model_task = f"{row['model_name']}/{row['task_name']}"
186
+
187
+ if delta > MTEB_SCORE_EPSILON:
188
+ violations.append(
189
+ f" {model_task}: The difference between the current score ({row['new_value']}) and the previous ({row['old_value']}) exceeds threshold of {MTEB_SCORE_EPSILON}"
190
+ )
191
+
192
+ assert not violations, (
193
+ f"Main score changes exceed configured threshold "
194
+ f"(MTEB_SCORE_EPSILON={MTEB_SCORE_EPSILON}):\n"
195
+ + "\n".join(violations)
196
+ )