Erfun commited on
Commit
8eda412
·
1 Parent(s): 0e17e03
Files changed (2) hide show
  1. paths.json +0 -0
  2. results.py +515 -0
paths.json ADDED
The diff for this file is too large to render. See raw diff
 
results.py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MTEB Results"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import os
7
+ from pathlib import Path
8
+
9
+ import datasets
10
+
11
+
12
+ logger = datasets.logging.get_logger(__name__)
13
+
14
+
15
+ _CITATION = """@article{muennighoff2022mteb,
16
+ doi = {10.48550/ARXIV.2210.07316},
17
+ url = {https://arxiv.org/abs/2210.07316},
18
+ author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils},
19
+ title = {MTEB: Massive Text Embedding Benchmark},
20
+ publisher = {arXiv},
21
+ journal={arXiv preprint arXiv:2210.07316},
22
+ year = {2022}
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """Results on MTEB"""
27
+
28
+ URL = "https://huggingface.co/datasets/mehran-sarmadi/results/resolve/main/paths.json"
29
+ VERSION = datasets.Version("1.0.1")
30
+ EVAL_LANGS = [
31
+ "af",
32
+ "afr-eng",
33
+ "am",
34
+ "amh",
35
+ "amh-eng",
36
+ "ang-eng",
37
+ "ar",
38
+ "ar-ar",
39
+ "ara-eng",
40
+ "arq-eng",
41
+ "arz-eng",
42
+ "ast-eng",
43
+ "awa-eng",
44
+ "az",
45
+ "aze-eng",
46
+ "bel-eng",
47
+ "ben-eng",
48
+ "ber-eng",
49
+ "bn",
50
+ "bos-eng",
51
+ "bre-eng",
52
+ "bul-eng",
53
+ "cat-eng",
54
+ "cbk-eng",
55
+ "ceb-eng",
56
+ "ces-eng",
57
+ "cha-eng",
58
+ "cmn-eng",
59
+ "cor-eng",
60
+ "csb-eng",
61
+ "cy",
62
+ "cym-eng",
63
+ "da",
64
+ "dan-eng",
65
+ "de",
66
+ "de-fr",
67
+ "de-pl",
68
+ "deu-eng",
69
+ "dsb-eng",
70
+ "dtp-eng",
71
+ "el",
72
+ "ell-eng",
73
+ "en",
74
+ "en-ar",
75
+ "en-de",
76
+ "en-en",
77
+ "en-tr",
78
+ "eng",
79
+ "epo-eng",
80
+ "es",
81
+ "es-en",
82
+ "es-es",
83
+ "es-it",
84
+ "est-eng",
85
+ "eus-eng",
86
+ "fa",
87
+ "fas-Arab"
88
+ "fao-eng",
89
+ "fi",
90
+ "fin-eng",
91
+ "fr",
92
+ "fr-en",
93
+ "fr-pl",
94
+ "fra",
95
+ "fra-eng",
96
+ "fry-eng",
97
+ "gla-eng",
98
+ "gle-eng",
99
+ "glg-eng",
100
+ "gsw-eng",
101
+ "hau",
102
+ "he",
103
+ "heb-eng",
104
+ "hi",
105
+ "hin-eng",
106
+ "hrv-eng",
107
+ "hsb-eng",
108
+ "hu",
109
+ "hun-eng",
110
+ "hy",
111
+ "hye-eng",
112
+ "ibo",
113
+ "id",
114
+ "ido-eng",
115
+ "ile-eng",
116
+ "ina-eng",
117
+ "ind-eng",
118
+ "is",
119
+ "isl-eng",
120
+ "it",
121
+ "it-en",
122
+ "ita-eng",
123
+ "ja",
124
+ "jav-eng",
125
+ "jpn-eng",
126
+ "jv",
127
+ "ka",
128
+ "kab-eng",
129
+ "kat-eng",
130
+ "kaz-eng",
131
+ "khm-eng",
132
+ "km",
133
+ "kn",
134
+ "ko",
135
+ "ko-ko",
136
+ "kor-eng",
137
+ "kur-eng",
138
+ "kzj-eng",
139
+ "lat-eng",
140
+ "lfn-eng",
141
+ "lit-eng",
142
+ "lin",
143
+ "lug",
144
+ "lv",
145
+ "lvs-eng",
146
+ "mal-eng",
147
+ "mar-eng",
148
+ "max-eng",
149
+ "mhr-eng",
150
+ "mkd-eng",
151
+ "ml",
152
+ "mn",
153
+ "mon-eng",
154
+ "ms",
155
+ "my",
156
+ "nb",
157
+ "nds-eng",
158
+ "nl",
159
+ "nl-ende-en",
160
+ "nld-eng",
161
+ "nno-eng",
162
+ "nob-eng",
163
+ "nov-eng",
164
+ "oci-eng",
165
+ "orm",
166
+ "orv-eng",
167
+ "pam-eng",
168
+ "pcm",
169
+ "pes-eng",
170
+ "pl",
171
+ "pl-en",
172
+ "pms-eng",
173
+ "pol-eng",
174
+ "por-eng",
175
+ "pt",
176
+ "ro",
177
+ "ron-eng",
178
+ "ru",
179
+ "run",
180
+ "rus-eng",
181
+ "sl",
182
+ "slk-eng",
183
+ "slv-eng",
184
+ "spa-eng",
185
+ "sna",
186
+ "som",
187
+ "sq",
188
+ "sqi-eng",
189
+ "srp-eng",
190
+ "sv",
191
+ "sw",
192
+ "swa",
193
+ "swe-eng",
194
+ "swg-eng",
195
+ "swh-eng",
196
+ "ta",
197
+ "tam-eng",
198
+ "tat-eng",
199
+ "te",
200
+ "tel-eng",
201
+ "tgl-eng",
202
+ "th",
203
+ "tha-eng",
204
+ "tir",
205
+ "tl",
206
+ "tr",
207
+ "tuk-eng",
208
+ "tur-eng",
209
+ "tzl-eng",
210
+ "uig-eng",
211
+ "ukr-eng",
212
+ "ur",
213
+ "urd-eng",
214
+ "uzb-eng",
215
+ "vi",
216
+ "vie-eng",
217
+ "war-eng",
218
+ "wuu-eng",
219
+ "xho",
220
+ "xho-eng",
221
+ "yid-eng",
222
+ "yor",
223
+ "yue-eng",
224
+ "zh",
225
+ "zh-CN",
226
+ "zh-TW",
227
+ "zh-en",
228
+ "zsm-eng",
229
+ ]
230
+
231
+ # v_measures key is somehow present in voyage-2-law results and is a list
232
+ SKIP_KEYS = ["std", "evaluation_time", "main_score", "threshold", "v_measures", "scores_per_experiment"]
233
+
234
+ # Use "train" split instead
235
+ TRAIN_SPLIT = ["DanishPoliticalCommentsClassification"]
236
+ # Use "validation" split instead
237
+ VALIDATION_SPLIT = [
238
+ "AFQMC",
239
+ "Cmnli",
240
+ "IFlyTek",
241
+ "LEMBSummScreenFDRetrieval",
242
+ "MSMARCO",
243
+ "MSMARCO-PL",
244
+ "MSMARCO-Fa",
245
+ "MultilingualSentiment",
246
+ "Ocnli",
247
+ "TNews",
248
+ ]
249
+ # Use "dev" split instead
250
+ DEV_SPLIT = [
251
+ "CmedqaRetrieval",
252
+ "CovidRetrieval",
253
+ "DuRetrieval",
254
+ "EcomRetrieval",
255
+ "MedicalRetrieval",
256
+ "MMarcoReranking",
257
+ "MMarcoRetrieval",
258
+ "MSMARCO",
259
+ "MSMARCO-PL",
260
+ "MSMARCO-Fa",
261
+ "T2Reranking",
262
+ "T2Retrieval",
263
+ "VideoRetrieval",
264
+ "TERRa",
265
+ "MIRACLReranking",
266
+ "MIRACLRetrieval",
267
+ ]
268
+ # Use "test.full" split
269
+ TESTFULL_SPLIT = ["OpusparcusPC"]
270
+ # Use "standard" split
271
+ STANDARD_SPLIT = ["BrightRetrieval"]
272
+ # Use "devtest" split
273
+ DEVTEST_SPLIT = ["FloresBitextMining"]
274
+
275
+ TEST_AVG_SPLIT = {
276
+ "LEMBNeedleRetrieval": [
277
+ "test_256",
278
+ "test_512",
279
+ "test_1024",
280
+ "test_2048",
281
+ "test_4096",
282
+ "test_8192",
283
+ "test_16384",
284
+ "test_32768",
285
+ ],
286
+ "LEMBPasskeyRetrieval": [
287
+ "test_256",
288
+ "test_512",
289
+ "test_1024",
290
+ "test_2048",
291
+ "test_4096",
292
+ "test_8192",
293
+ "test_16384",
294
+ "test_32768",
295
+ ],
296
+ }
297
+
298
+ MODELS = sorted(list(set([str(file).split('/')[-1] for file in (Path(__file__).parent / "results").glob("*") if file.is_dir()])))
299
+
300
+ # Needs to be run whenever new files are added
301
+ def get_paths():
302
+ import collections, json, os
303
+
304
+ files = collections.defaultdict(list)
305
+ for model_dir in MODELS:
306
+ results_model_dir = os.path.join("results", model_dir)
307
+ if not os.path.isdir(results_model_dir):
308
+ print(f"Skipping {results_model_dir}")
309
+ continue
310
+ for revision_folder in os.listdir(results_model_dir):
311
+ if not os.path.isdir(os.path.join(results_model_dir, revision_folder)):
312
+ continue
313
+ if revision_folder == "external":
314
+ continue
315
+ for res_file in os.listdir(os.path.join(results_model_dir, revision_folder)):
316
+ if (res_file.endswith(".json")) and not (
317
+ res_file.endswith(("overall_results.json", "model_meta.json"))
318
+ ):
319
+ results_model_file = os.path.join(results_model_dir, revision_folder, res_file)
320
+ files[model_dir].append(results_model_file)
321
+ with open("paths.json", "w") as f:
322
+ json.dump(files, f, indent=2)
323
+ return files
324
+
325
+
326
+ class MTEBResults(datasets.GeneratorBasedBuilder):
327
+ """MTEBResults"""
328
+
329
+ BUILDER_CONFIGS = [
330
+ datasets.BuilderConfig(
331
+ name=model,
332
+ description=f"{model} MTEB results",
333
+ version=VERSION,
334
+ )
335
+ for model in MODELS
336
+ ]
337
+
338
+ def _info(self):
339
+ return datasets.DatasetInfo(
340
+ description=_DESCRIPTION,
341
+ features=datasets.Features(
342
+ {
343
+ "mteb_dataset_name": datasets.Value("string"),
344
+ "eval_language": datasets.Value("string"),
345
+ "metric": datasets.Value("string"),
346
+ "score": datasets.Value("float"),
347
+ "split": datasets.Value("string"),
348
+ "hf_subset": datasets.Value("string"),
349
+ }
350
+ ),
351
+ supervised_keys=None,
352
+ citation=_CITATION,
353
+ )
354
+
355
+ def _split_generators(self, dl_manager):
356
+ path_file = dl_manager.download_and_extract(URL)
357
+ # Local debugging help
358
+ # with open("/path/to/local/paths.json") as f:
359
+ with open(path_file) as f:
360
+ files = json.load(f)
361
+ downloaded_files = dl_manager.download_and_extract(files[self.config.name])
362
+ return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files})]
363
+
364
+ def _generate_examples(self, filepath):
365
+ """This function returns the examples in the raw (text) form."""
366
+ logger.info(f"Generating examples from {filepath}")
367
+ out = []
368
+
369
+ for path in filepath:
370
+ with open(path, encoding="utf-8") as f:
371
+ res_dict = json.load(f)
372
+ # Naming changed from mteb_dataset_name to task_name
373
+ ds_name = res_dict.get("mteb_dataset_name", res_dict.get("task_name"))
374
+ # New MTEB format uses scores
375
+ res_dict = res_dict.get("scores", res_dict)
376
+
377
+ split = "test"
378
+ if (ds_name in TRAIN_SPLIT) and ("train" in res_dict):
379
+ split = "train"
380
+ elif (ds_name in VALIDATION_SPLIT) and ("validation" in res_dict):
381
+ split = "validation"
382
+ elif (ds_name in DEV_SPLIT) and ("dev" in res_dict):
383
+ split = "dev"
384
+ elif (ds_name in TESTFULL_SPLIT) and ("test.full" in res_dict):
385
+ split = "test.full"
386
+ elif ds_name in STANDARD_SPLIT:
387
+ split = []
388
+ if "standard" in res_dict:
389
+ split += ["standard"]
390
+ if "long" in res_dict:
391
+ split += ["long"]
392
+ elif (ds_name in DEVTEST_SPLIT) and ("devtest" in res_dict):
393
+ split = "devtest"
394
+ elif ds_name in TEST_AVG_SPLIT:
395
+ # Average splits
396
+ res_dict = {}
397
+ for split in TEST_AVG_SPLIT[ds_name]:
398
+ # Old MTEB format
399
+ if isinstance(res_dict.get(split), dict):
400
+ for k, v in res_dict.get(split, {}).items():
401
+ if k in ["hf_subset", "languages"]:
402
+ res_dict[k] = v
403
+
404
+ v /= len(TEST_AVG_SPLIT[ds_name])
405
+ if k not in res_dict:
406
+ res_dict[k] = v
407
+ else:
408
+ res_dict[k] += v
409
+ # New MTEB format
410
+ elif isinstance(res_dict.get(split), list):
411
+ assert len(res_dict[split]) == 1, "Only single-lists supported for now"
412
+ for k, v in res_dict[split][0].items():
413
+ if k in ["hf_subset", "languages"]:
414
+ res_dict[k] = v
415
+ if not isinstance(v, float):
416
+ continue
417
+ v /= len(TEST_AVG_SPLIT[ds_name])
418
+ if k not in res_dict:
419
+ res_dict[k] = v
420
+ else:
421
+ res_dict[k] += v
422
+ split = "test_avg"
423
+ res_dict = {split: [res_dict]}
424
+ elif "test" not in res_dict:
425
+ print(f"Skipping {ds_name} as split {split} not present.")
426
+ continue
427
+
428
+ splits = [split] if not isinstance(split, list) else split
429
+ full_res_dict = res_dict
430
+ for split in splits:
431
+ res_dict = full_res_dict.get(split)
432
+
433
+ ### New MTEB format ###
434
+ if isinstance(res_dict, list):
435
+ for res in res_dict:
436
+ lang = res.pop("languages", [""])
437
+ subset = res.pop("hf_subset", "")
438
+ if len(lang) == 1:
439
+ lang = lang[0].replace("eng-Latn", "")
440
+ else:
441
+ lang = "_".join(lang)
442
+ if not lang:
443
+ lang = subset
444
+ for metric, score in res.items():
445
+ if metric in SKIP_KEYS:
446
+ continue
447
+ if isinstance(score, dict):
448
+ # Legacy format with e.g. {cosine: {spearman: ...}}
449
+ # Now it is {cosine_spearman: ...}
450
+ for k, v in score.items():
451
+ if not isinstance(v, float):
452
+ print(f"WARNING: Expected float, got {v} for {ds_name} {lang} {metric} {k}")
453
+ continue
454
+ if metric in SKIP_KEYS:
455
+ continue
456
+ out.append(
457
+ {
458
+ "mteb_dataset_name": ds_name,
459
+ "eval_language": lang,
460
+ "metric": metric + "_" + k,
461
+ "score": v * 100,
462
+ "hf_subset": subset,
463
+ }
464
+ )
465
+ else:
466
+ if not isinstance(score, float):
467
+ print(f"WARNING: Expected float, got {score} for {ds_name} {lang} {metric}")
468
+ continue
469
+ out.append(
470
+ {
471
+ "mteb_dataset_name": ds_name,
472
+ "eval_language": lang,
473
+ "metric": metric,
474
+ "score": score * 100,
475
+ "split": split,
476
+ "hf_subset": subset,
477
+ }
478
+ )
479
+
480
+ ### Old MTEB format ###
481
+ else:
482
+ is_multilingual = any(x in res_dict for x in EVAL_LANGS)
483
+ langs = res_dict.keys() if is_multilingual else ["en"]
484
+ for lang in langs:
485
+ if lang in SKIP_KEYS:
486
+ continue
487
+ test_result_lang = res_dict.get(lang) if is_multilingual else res_dict
488
+ subset = test_result_lang.pop("hf_subset", "")
489
+ if subset == "" and is_multilingual:
490
+ subset = lang
491
+ for metric, score in test_result_lang.items():
492
+ if not isinstance(score, dict):
493
+ score = {metric: score}
494
+ for sub_metric, sub_score in score.items():
495
+ if any(x in sub_metric for x in SKIP_KEYS):
496
+ continue
497
+ if isinstance(sub_score, dict):
498
+ continue
499
+ out.append(
500
+ {
501
+ "mteb_dataset_name": ds_name,
502
+ "eval_language": lang if is_multilingual else "",
503
+ "metric": f"{metric}_{sub_metric}" if metric != sub_metric else metric,
504
+ "score": sub_score * 100,
505
+ "split": split,
506
+ "hf_subset": subset,
507
+ }
508
+ )
509
+ for idx, row in enumerate(sorted(out, key=lambda x: x["mteb_dataset_name"])):
510
+ yield idx, row
511
+
512
+
513
+ # NOTE: for generating the new paths
514
+ if __name__ == "__main__":
515
+ get_paths()