Samoed commited on
Commit
0f1702e
·
unverified ·
1 Parent(s): f02b091

add support for v2 (#301)

Browse files

* add support for v2

* test

.github/workflows/model-results-comparison.yaml CHANGED
@@ -43,7 +43,7 @@ jobs:
43
 
44
  - name: Install dependencies
45
  run: |
46
- pip install git+https://github.com/embeddings-benchmark/mteb.git tabulate
47
 
48
  - name: Generate model comparison
49
  env:
 
43
 
44
  - name: Install dependencies
45
  run: |
46
+ pip install git+https://github.com/embeddings-benchmark/mteb@v2.0.0 tabulate
47
 
48
  - name: Generate model comparison
49
  env:
results/BAAI__bge-m3/5617a9f61b028005a4858fdac845db406aefb181/AlphaNLI.json CHANGED
@@ -155,4 +155,4 @@
155
  ]
156
  },
157
  "task_name": "AlphaNLI"
158
- }
 
155
  ]
156
  },
157
  "task_name": "AlphaNLI"
158
+ }
scripts/create_pr_results_comment.py CHANGED
@@ -24,14 +24,14 @@ from __future__ import annotations
24
  import argparse
25
  import json
26
  import logging
27
- import os
28
  import subprocess
29
  from collections import defaultdict
30
  from pathlib import Path
31
 
32
  import mteb
33
  import pandas as pd
34
- from mteb.abstasks.AbsTask import AbsTask
 
35
 
36
  ModelName = str
37
  ModelRevision = str
@@ -49,7 +49,7 @@ logger = logging.getLogger(__name__)
49
 
50
  repo_path = Path(__file__).parents[1]
51
 
52
- os.environ["MTEB_CACHE"] = str(repo_path.parent)
53
 
54
 
55
  def get_diff_from_main() -> list[str]:
@@ -110,7 +110,7 @@ def create_comparison_table(
110
  models = [model] + reference_models
111
  max_col_name = "Max result"
112
  task_col_name = "task_name"
113
- results = mteb.load_results(models=models, tasks=tasks, download_latest=False)
114
  df = results.to_dataframe(include_model_revision=True)
115
  new_df_columns = []
116
  columns_to_merge = defaultdict(list)
@@ -140,7 +140,7 @@ def create_comparison_table(
140
  raise ValueError(f"No results found for models {models} on tasks {tasks}")
141
 
142
  df[max_col_name] = None
143
- task_results = mteb.load_results(tasks=tasks, download_latest=False)
144
  task_results = task_results.join_revisions()
145
 
146
  task_results_df = task_results.to_dataframe(format="long")
 
24
  import argparse
25
  import json
26
  import logging
 
27
  import subprocess
28
  from collections import defaultdict
29
  from pathlib import Path
30
 
31
  import mteb
32
  import pandas as pd
33
+ from mteb import AbsTask
34
+ from mteb.cache import ResultCache
35
 
36
  ModelName = str
37
  ModelRevision = str
 
49
 
50
  repo_path = Path(__file__).parents[1]
51
 
52
+ cache = ResultCache(repo_path)
53
 
54
 
55
  def get_diff_from_main() -> list[str]:
 
110
  models = [model] + reference_models
111
  max_col_name = "Max result"
112
  task_col_name = "task_name"
113
+ results = cache.load_results(models=models, tasks=tasks)
114
  df = results.to_dataframe(include_model_revision=True)
115
  new_df_columns = []
116
  columns_to_merge = defaultdict(list)
 
140
  raise ValueError(f"No results found for models {models} on tasks {tasks}")
141
 
142
  df[max_col_name] = None
143
+ task_results = cache.load_results(tasks=tasks)
144
  task_results = task_results.join_revisions()
145
 
146
  task_results_df = task_results.to_dataframe(format="long")