File size: 6,643 Bytes
141ba02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
"""
Tests for result difference validation.

This test module reuses functions from create_pr_results_comment.py to:
1. Validate that main_score changes don't exceed configured thresholds
2. Provide a summary of all main_score changes in the PR
"""

import json
import os
import sys
import subprocess
from pathlib import Path
import pandas as pd
import pytest
from mteb import TaskResult

MTEB_SCORE_EPSILON=0.001 
repo_path = Path(__file__).parents[1]

def get_base_ref() -> str:
    """Get the base reference for comparison (PR_BASE_SHA env var or origin/main)."""
    return os.getenv("PR_BASE_SHA", "origin/main")

def get_diff_from_main() -> list[str]:
    differences = subprocess.run(
        ["git", "diff", "--name-only", "origin/main...HEAD"],
        cwd=repo_path,
        text=True,
        capture_output=True,
    ).stdout.splitlines()

    return differences

def load_json_from_git_ref(relative_path: str, git_ref: str) -> dict | None:
    """Load a JSON file from a specific git reference."""
    result = subprocess.run(
        ["git", "show", f"{git_ref}:{relative_path}"],
        cwd=repo_path,
        text=True,
        capture_output=True,
    )
    if result.returncode != 0 or not result.stdout.strip():
        return None
    try:
        return json.loads(result.stdout)
    except json.JSONDecodeError:
        return None

def extract_main_score(task_result_dict: dict) -> dict[tuple[str, str], float]:
    """
    Extract main_score for each split/subset combination from task result.
    No aggregation - returns the main_score value for each (split, subset) pair.
    
    Returns:
        Dict mapping (split, subset) tuples to their main_score value.
        Example: {("test", "default"): 0.85, ("test", "en"): 0.90}
    """
    split_subset_scores: dict[tuple[str, str], float] = {}
    
    try:
        task_result = TaskResult.from_dict(task_result_dict)
        filtered_result = task_result.only_main_score()
        
        for split_name, split_scores in filtered_result.scores.items():
            for subset_score in split_scores:
                subset_name = subset_score.get("hf_subset")
                main_score = subset_score.get("main_score")
                
                if (subset_name is not None and main_score is not None 
                    and not pd.isna(main_score)):
                    value = float(main_score)
                    if value > 1:
                        value /= 100
                    split_subset_scores[(split_name, subset_name)] = value
        
        return split_subset_scores
    except Exception:
        return {}

def create_old_new_diff_table(differences: list[str], base_ref: str) -> pd.DataFrame:
    """Create DataFrame comparing old and new main_score for each split/subset."""
    columns = ["model_name", "task_name", "split", "subset", "old_revision", "old_value", "new_revision", "new_value", "delta", "pct_change"]
    rows: list[dict] = []

    for relative_path in differences:
        path = repo_path / relative_path
        if not path.exists() or path.suffix != ".json" or path.name == "model_meta.json":
            continue

        model_meta_path = path.parent / "model_meta.json"
        task_name = path.stem
        
        if not model_meta_path.exists():
            continue
            
        try:
            with model_meta_path.open("r") as f:
                model_meta = json.load(f)
                model_name = model_meta["name"]
                new_revision = model_meta["revision"]
        except (json.JSONDecodeError, IOError, KeyError):
            continue

        old_json = load_json_from_git_ref(relative_path, base_ref)
        if old_json is None:
            continue

        old_model_meta = load_json_from_git_ref(str(model_meta_path.relative_to(repo_path)), base_ref)
        if old_model_meta is None:
            old_revision = "unknown"
        else:
            try:
                old_revision = old_model_meta.get("revision", "unknown")
            except (AttributeError, TypeError):
                old_revision = "unknown"

        try:
            with path.open("r") as f:
                new_json = json.load(f)
        except (json.JSONDecodeError, IOError):
            continue

        old_scores = extract_main_score(old_json)
        new_scores = extract_main_score(new_json)
        
        if not old_scores or not new_scores:
            continue
        
        for (split, subset), new_value in new_scores.items():
            if (split, subset) not in old_scores:
                continue
            
            old_value = old_scores[(split, subset)]
            
            delta = new_value - old_value
            if delta == 0:
                continue
            
            pct_change = None if old_value == 0 else delta / old_value

            rows.append({
                "model_name": model_name,
                "task_name": task_name,
                "split": split,
                "subset": subset,
                "old_revision": old_revision,
                "old_value": old_value,
                "new_revision": new_revision,
                "new_value": new_value,
                "delta": delta,
                "pct_change": pct_change,
            })

    if not rows:
        return pd.DataFrame(columns=columns)

    return pd.DataFrame(rows, columns=columns).sort_values(
        ["model_name", "task_name", "split", "subset"]
    )

def test_result_diffs_within_threshold():
    """
    Fail if any main_score delta exceeds configured thresholds.
    """
    
    base_ref = get_base_ref()
    differences = get_diff_from_main()
    print(differences)
    
    # Skip test if no changes found
    if not differences:
        pytest.skip("No changes found between base and current branch")
    
    diff_table = create_old_new_diff_table(differences, base_ref)
    
    # Skip test if no comparable results found
    if diff_table.empty:
        pytest.skip("No comparable updated result files found")
    
    violations = []
    for _, row in diff_table.iterrows():
        delta = abs(row["delta"])
        
        model_task = f"{row['model_name']}/{row['task_name']}"
        
        if delta > MTEB_SCORE_EPSILON:
            violations.append(
            f"  {model_task}: The difference between the current score ({row['new_value']}) and the previous ({row['old_value']}) exceeds threshold of {MTEB_SCORE_EPSILON}"
            )
    
    assert not violations, (
        f"Main score changes exceed configured threshold "
        f"(MTEB_SCORE_EPSILON={MTEB_SCORE_EPSILON}):\n"
        + "\n".join(violations)
    )