Erfun commited on
Commit ·
0e17e03
1
Parent(s): 284f0e4
first commit
Browse files- CONTRIBUTING +3 -0
- README.md +25 -0
- load_external.py +253 -0
- makefile +12 -0
- pyproject.toml +7 -0
- reduce_large_json_files.py +43 -0
- scripts/convert_bright.py +83 -0
- scripts/merge_same_model.py +46 -0
- scripts/normalize_results_folder.py +128 -0
- scripts/remove_ablations.py +17 -0
- tests/__init__.py +0 -0
- tests/mock_cache_dir/readme.md +1 -0
- tests/mock_cache_dir/results/results +1 -0
- tests/test_correct_folder_structure.py +287 -0
- tests/test_ensure_correct_metadata.py +73 -0
- tests/test_load_datasets.py +12 -0
- tests/test_load_results.py +27 -0
CONTRIBUTING
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
TODO: add tests and commit hooks
|
| 2 |
+
|
| 3 |
+
Users must be sure no files are over 10MB. If there are we should remove all spaces from them to keep them < 10MB. This should be a pre-commit hook checking.
|
README.md
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
benchmark: mteb
|
| 3 |
+
type: evaluation
|
| 4 |
+
submission_name: MTEB
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
> [!NOTE]
|
| 8 |
+
> Previously it was possible to submit models results to MTEB by adding the results to the model metadata. This is no longer an option as we want to ensure high quality metadata.
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
This repository contains the results of the embedding benchmark evaluated using the package `mteb`.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
| Reference | |
|
| 15 |
+
| ------------------- | ---------------------------------------------------------------------------------------- |
|
| 16 |
+
| 🦾 **[Leaderboard]** | An up to date leaderboard of embedding models |
|
| 17 |
+
| 📚 **[mteb]** | Guides and instructions on how to use `mteb`, including running, submitting scores, etc. |
|
| 18 |
+
| 🙋 **[Questions]** | Questions about the results |
|
| 19 |
+
| 🙋 **[Issues]** | Issues or bugs you have found |
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
[Leaderboard]: https://huggingface.co/spaces/mteb/leaderboard
|
| 23 |
+
[mteb]: https://github.com/embeddings-benchmark/mteb
|
| 24 |
+
[Questions]: https://github.com/embeddings-benchmark/mteb/discussions
|
| 25 |
+
[Issues]: https://github.com/embeddings-benchmark/mteb/issues
|
load_external.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import math
|
| 6 |
+
import re
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Any
|
| 9 |
+
|
| 10 |
+
from huggingface_hub import HfApi, get_hf_file_metadata, hf_hub_download, hf_hub_url
|
| 11 |
+
from huggingface_hub.errors import NotASafetensorsRepoError
|
| 12 |
+
from huggingface_hub.hf_api import ModelInfo
|
| 13 |
+
from huggingface_hub.repocard import metadata_load
|
| 14 |
+
from mteb import ModelMeta, get_task
|
| 15 |
+
|
| 16 |
+
API = HfApi()
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
library_mapping = {
|
| 21 |
+
"sentence-transformers": "Sentence Transformers",
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_model_dir(model_id: str) -> Path:
|
| 26 |
+
external_result_dir = Path("results") / model_id.replace("/", "__") / "external"
|
| 27 |
+
return external_result_dir
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
renamed_tasks = {
|
| 31 |
+
"NorwegianParliament": "NorwegianParliamentClassification",
|
| 32 |
+
"CMedQAv2": "CMedQAv2-reranking",
|
| 33 |
+
"CMedQAv1": "CMedQAv1-reranking",
|
| 34 |
+
"8TagsClustering": "EightTagsClustering",
|
| 35 |
+
"PPC": "PpcPC",
|
| 36 |
+
"PawsX": "PawsXParaphraseIdentification",
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def simplify_dataset_name(name: str) -> str:
|
| 41 |
+
task_name = name.replace("MTEB ", "").split()[0]
|
| 42 |
+
return renamed_tasks.get(task_name, task_name)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get_model_parameters_memory(model_info: ModelInfo) -> tuple[int| None, float|None]:
|
| 46 |
+
try:
|
| 47 |
+
safetensors = API.get_safetensors_metadata(model_info.id)
|
| 48 |
+
num_parameters = sum(safetensors.parameter_count.values())
|
| 49 |
+
return num_parameters, round(num_parameters * 4 / 1024 ** 3, 2)
|
| 50 |
+
except NotASafetensorsRepoError as e:
|
| 51 |
+
logger.info(f"Could not find SafeTensors metadata for {model_info.id}")
|
| 52 |
+
|
| 53 |
+
filenames = [sib.rfilename for sib in model_info.siblings]
|
| 54 |
+
if "pytorch_model.bin" in filenames:
|
| 55 |
+
url = hf_hub_url(model_info.id, filename="pytorch_model.bin")
|
| 56 |
+
meta = get_hf_file_metadata(url)
|
| 57 |
+
bytes_per_param = 4
|
| 58 |
+
num_params = round(meta.size / bytes_per_param)
|
| 59 |
+
size_gb = round(meta.size * (4 / bytes_per_param) / 1024 ** 3, 2)
|
| 60 |
+
return num_params, size_gb
|
| 61 |
+
if "pytorch_model.bin.index.json" in filenames:
|
| 62 |
+
index_path = hf_hub_download(model_info.id, filename="pytorch_model.bin.index.json")
|
| 63 |
+
size = json.load(open(index_path))
|
| 64 |
+
bytes_per_param = 4
|
| 65 |
+
if "metadata" in size and "total_size" in size["metadata"]:
|
| 66 |
+
return round(size["metadata"]["total_size"] / bytes_per_param), round(size["metadata"]["total_size"] / 1024 ** 3, 2)
|
| 67 |
+
logger.info(f"Could not find the model parameters for {model_info.id}")
|
| 68 |
+
return None, None
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def get_dim_seq_size(model: ModelInfo) -> tuple[str | None, str | None, int, float]:
|
| 72 |
+
siblings = model.siblings or []
|
| 73 |
+
filenames = [sib.rfilename for sib in siblings]
|
| 74 |
+
dim, seq = None, None
|
| 75 |
+
for filename in filenames:
|
| 76 |
+
if re.match(r"\d+_Pooling/config.json", filename):
|
| 77 |
+
st_config_path = hf_hub_download(model.id, filename=filename)
|
| 78 |
+
dim = json.load(open(st_config_path)).get("word_embedding_dimension", None)
|
| 79 |
+
break
|
| 80 |
+
for filename in filenames:
|
| 81 |
+
if re.match(r"\d+_Dense/config.json", filename):
|
| 82 |
+
st_config_path = hf_hub_download(model.id, filename=filename)
|
| 83 |
+
dim = json.load(open(st_config_path)).get("out_features", dim)
|
| 84 |
+
if "config.json" in filenames:
|
| 85 |
+
config_path = hf_hub_download(model.id, filename="config.json")
|
| 86 |
+
config = json.load(open(config_path))
|
| 87 |
+
if not dim:
|
| 88 |
+
dim = config.get("hidden_dim", config.get("hidden_size", config.get("d_model", None)))
|
| 89 |
+
seq = config.get("n_positions", config.get("max_position_embeddings", config.get("n_ctx", config.get("seq_length", None))))
|
| 90 |
+
|
| 91 |
+
parameters, memory = get_model_parameters_memory(model)
|
| 92 |
+
return dim, seq, parameters, memory
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def create_model_meta(model_info: ModelInfo) -> ModelMeta | None:
|
| 96 |
+
readme_path = hf_hub_download(model_info.id, filename="README.md", etag_timeout=30)
|
| 97 |
+
meta = metadata_load(readme_path)
|
| 98 |
+
dim, seq, parameters, memory = None, None, None, None
|
| 99 |
+
try:
|
| 100 |
+
dim, seq, parameters, memory = get_dim_seq_size(model_info)
|
| 101 |
+
except Exception as e:
|
| 102 |
+
logger.error(f"Error getting model parameters for {model_info.id}, {e}")
|
| 103 |
+
|
| 104 |
+
release_date = str(model_info.created_at.date()) if model_info.created_at else ""
|
| 105 |
+
library = [library_mapping[model_info.library_name]] if model_info.library_name in library_mapping else []
|
| 106 |
+
languages = meta.get("language", [])
|
| 107 |
+
if not isinstance(languages, list) and isinstance(languages, str):
|
| 108 |
+
languages = [languages]
|
| 109 |
+
# yaml transforms norwegian `no` to False
|
| 110 |
+
for i in range(len(languages)):
|
| 111 |
+
if languages[i] is False:
|
| 112 |
+
languages[i] = "no"
|
| 113 |
+
|
| 114 |
+
model_meta = ModelMeta(
|
| 115 |
+
name=model_info.id,
|
| 116 |
+
revision=model_info.sha,
|
| 117 |
+
release_date=release_date,
|
| 118 |
+
open_weights=True,
|
| 119 |
+
framework=library,
|
| 120 |
+
license=meta.get("license", None),
|
| 121 |
+
embed_dim=dim,
|
| 122 |
+
max_tokens=seq,
|
| 123 |
+
n_parameters=parameters,
|
| 124 |
+
languages=languages,
|
| 125 |
+
)
|
| 126 |
+
return model_meta
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def parse_readme(model_info: ModelInfo) -> dict[str, dict[str, Any]] | None:
|
| 130 |
+
model_id = model_info.id
|
| 131 |
+
try:
|
| 132 |
+
readme_path = hf_hub_download(model_info.id, filename="README.md", etag_timeout=30)
|
| 133 |
+
except Exception:
|
| 134 |
+
logger.warning(f"ERROR: Could not fetch metadata for {model_id}, trying again")
|
| 135 |
+
readme_path = hf_hub_download(model_id, filename="README.md", etag_timeout=30)
|
| 136 |
+
meta = metadata_load(readme_path)
|
| 137 |
+
if "model-index" not in meta:
|
| 138 |
+
logger.info(f"Could not find model-index in {model_id}")
|
| 139 |
+
return
|
| 140 |
+
model_index = meta["model-index"][0]
|
| 141 |
+
model_name_from_readme = model_index.get("name", None)
|
| 142 |
+
orgs = ["Alibaba-NLP", "HIT-TMG", "McGill-NLP", "Snowflake", "facebook", "jinaai", "nomic-ai"]
|
| 143 |
+
is_org = any([model_id.startswith(org) for org in orgs])
|
| 144 |
+
# There a lot of reuploads with tunes, quantization, etc. We only want the original model
|
| 145 |
+
# to prevent this most of the time we can check if the model name from the readme is the same as the model id
|
| 146 |
+
# but some orgs have a different naming in their readme
|
| 147 |
+
if model_name_from_readme and not model_info.id.endswith(model_name_from_readme) and not is_org:
|
| 148 |
+
logger.warning(f"Model name mismatch: {model_info.id} vs {model_name_from_readme}")
|
| 149 |
+
return
|
| 150 |
+
results = model_index.get("results", [])
|
| 151 |
+
model_results = {}
|
| 152 |
+
for result in results:
|
| 153 |
+
dataset = result["dataset"]
|
| 154 |
+
dataset_type = simplify_dataset_name(dataset["name"])
|
| 155 |
+
|
| 156 |
+
if dataset_type not in model_results:
|
| 157 |
+
output_dict = {
|
| 158 |
+
"dataset_revision": dataset.get("revision", ""),
|
| 159 |
+
"task_name": simplify_dataset_name(dataset["name"]),
|
| 160 |
+
"evaluation_time": None,
|
| 161 |
+
"mteb_version": None,
|
| 162 |
+
"scores": {},
|
| 163 |
+
}
|
| 164 |
+
else:
|
| 165 |
+
output_dict = model_results[dataset_type]
|
| 166 |
+
|
| 167 |
+
try:
|
| 168 |
+
mteb_task = get_task(output_dict["task_name"])
|
| 169 |
+
except Exception:
|
| 170 |
+
logger.warning(f"Error getting task for {model_id} {output_dict['task_name']}")
|
| 171 |
+
continue
|
| 172 |
+
|
| 173 |
+
mteb_task_metadata = mteb_task.metadata
|
| 174 |
+
mteb_task_eval_languages = mteb_task_metadata.eval_langs
|
| 175 |
+
|
| 176 |
+
scores_dict = output_dict["scores"]
|
| 177 |
+
current_split = dataset["split"]
|
| 178 |
+
current_config = dataset.get("config", "")
|
| 179 |
+
cur_split_metrics = {
|
| 180 |
+
"hf_subset": current_config,
|
| 181 |
+
"languages": mteb_task_eval_languages if isinstance(mteb_task_eval_languages, list) else mteb_task_eval_languages.get(current_config, ["None"]),
|
| 182 |
+
}
|
| 183 |
+
for metric in result["metrics"]:
|
| 184 |
+
if isinstance(metric["value"], (float, int)):
|
| 185 |
+
cur_split_metrics[metric["type"]] = metric["value"] / 100
|
| 186 |
+
else:
|
| 187 |
+
cur_split_metrics[metric["type"]] = metric["value"]
|
| 188 |
+
|
| 189 |
+
main_score_str = "main_score"
|
| 190 |
+
if main_score_str not in cur_split_metrics:
|
| 191 |
+
# old sts and sum_eval have cos_sim_pearson, but in model_meta cosine_spearman is main_score
|
| 192 |
+
for old_metric, new_metric in zip(["cos_sim_pearson", "cos_sim_spearman"], ["cosine_pearson", "cosine_spearman"]):
|
| 193 |
+
if old_metric in cur_split_metrics:
|
| 194 |
+
cur_split_metrics[new_metric] = cur_split_metrics[old_metric]
|
| 195 |
+
|
| 196 |
+
if mteb_task.metadata.main_score not in cur_split_metrics:
|
| 197 |
+
logger.warning(f"Could not find main score for {model_id} {output_dict['task_name']}, mteb task {mteb_task.metadata.name}. Main score: {mteb_task.metadata.main_score}. Metrics: {cur_split_metrics}, result {result['metrics']}")
|
| 198 |
+
continue
|
| 199 |
+
|
| 200 |
+
cur_split_metrics[main_score_str] = cur_split_metrics.get(mteb_task.metadata.main_score, None)
|
| 201 |
+
split_metrics = scores_dict.get(current_split, [])
|
| 202 |
+
split_metrics.append(cur_split_metrics)
|
| 203 |
+
scores_dict[current_split] = split_metrics
|
| 204 |
+
model_results[dataset_type] = output_dict
|
| 205 |
+
return model_results
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def get_mteb_data() -> None:
|
| 209 |
+
models = sorted(list(API.list_models(filter="mteb", full=True)), key=lambda x: x.id)
|
| 210 |
+
# models = [model for model in models if model.id == "ai-forever/ru-en-RoSBERTa"]
|
| 211 |
+
for i, model_info in enumerate(models, start=1):
|
| 212 |
+
logger.info(f"[{i}/{len(models)}] Processing {model_info.id}")
|
| 213 |
+
model_path = get_model_dir(model_info.id)
|
| 214 |
+
if (model_path / "model_meta.json").exists() and len(list(model_path.glob("*.json"))) > 1:
|
| 215 |
+
logger.info(f"Model meta already exists for {model_info.id}")
|
| 216 |
+
# continue
|
| 217 |
+
if model_info.id.lower().endswith("gguf"):
|
| 218 |
+
logger.info(f"Skipping {model_info.id} GGUF model")
|
| 219 |
+
continue
|
| 220 |
+
|
| 221 |
+
spam_users = ["ILKT", "fine-tuned", "mlx-community"]
|
| 222 |
+
is_spam = False
|
| 223 |
+
for spam_user in spam_users:
|
| 224 |
+
if model_info.id.startswith(spam_user):
|
| 225 |
+
logger.info(f"Skipping {model_info.id}")
|
| 226 |
+
is_spam = True
|
| 227 |
+
continue
|
| 228 |
+
if is_spam:
|
| 229 |
+
continue
|
| 230 |
+
model_meta = create_model_meta(model_info)
|
| 231 |
+
model_results = parse_readme(model_info)
|
| 232 |
+
|
| 233 |
+
if not model_meta or not model_results:
|
| 234 |
+
logger.warning(f"Could not get model meta or results for {model_info.id}")
|
| 235 |
+
continue
|
| 236 |
+
|
| 237 |
+
if not model_path.exists():
|
| 238 |
+
model_path.mkdir(parents=True, exist_ok=True)
|
| 239 |
+
|
| 240 |
+
model_meta_path = model_path / "model_meta.json"
|
| 241 |
+
with model_meta_path.open("w") as f:
|
| 242 |
+
json.dump(model_meta.model_dump(), f, indent=4)
|
| 243 |
+
|
| 244 |
+
for model_result in model_results:
|
| 245 |
+
task_name = model_results[model_result]["task_name"]
|
| 246 |
+
result_file = model_path / f"{task_name}.json"
|
| 247 |
+
with result_file.open("w") as f:
|
| 248 |
+
json.dump(model_results[model_result], f, indent=4)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
if __name__ == "__main__":
|
| 252 |
+
logging.basicConfig(level=logging.INFO)
|
| 253 |
+
get_mteb_data()
|
makefile
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
install-for-tests:
|
| 2 |
+
@echo "--- Installing dependencies for tests ---"
|
| 3 |
+
pip install pip --upgrade
|
| 4 |
+
pip install .
|
| 5 |
+
|
| 6 |
+
test:
|
| 7 |
+
@echo "--- Running tests ---"
|
| 8 |
+
pytest
|
| 9 |
+
|
| 10 |
+
pre-push:
|
| 11 |
+
@echo "--- Running pre-push commands ---"
|
| 12 |
+
python reduce_large_json_files.py
|
pyproject.toml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "results"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "The result repository for mteb"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.9"
|
| 7 |
+
dependencies = ["mteb[dev]>=1.13.0"]
|
reduce_large_json_files.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import mteb
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def resize_flores():
|
| 10 |
+
"""
|
| 11 |
+
includes only relevant splits from the FloresBitextMining.json files
|
| 12 |
+
"""
|
| 13 |
+
paths = Path(__file__).parent.glob("**/FloresBitextMining.json")
|
| 14 |
+
|
| 15 |
+
for p in paths:
|
| 16 |
+
try:
|
| 17 |
+
res = mteb.MTEBResults.from_disk(p)
|
| 18 |
+
res.validate_and_filter_scores()
|
| 19 |
+
res.to_disk(p)
|
| 20 |
+
except Exception:
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def remove_spaces():
|
| 25 |
+
"""
|
| 26 |
+
removes spaces from the json files
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
for file in glob.glob("results/*/*/*.json"):
|
| 30 |
+
# if the file is greater than 9 MB, compress it with gzip
|
| 31 |
+
if os.path.getsize(file) >= 9.5 * 1024 * 1024:
|
| 32 |
+
print(f"Resizing {file} to have no indentations")
|
| 33 |
+
# read it in as json and write it out with no indent
|
| 34 |
+
with open(file, "r") as f:
|
| 35 |
+
data = json.load(f)
|
| 36 |
+
|
| 37 |
+
with open(file, "w") as f:
|
| 38 |
+
json.dump(data, f, indent=None)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
if __name__ == "__main__":
|
| 42 |
+
resize_flores()
|
| 43 |
+
remove_spaces()
|
scripts/convert_bright.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
REPLACE_MAP = {
|
| 5 |
+
"NDCG": "ndcg",
|
| 6 |
+
"MAP": "map",
|
| 7 |
+
"MRR": "mrr",
|
| 8 |
+
"RECALL": "recall",
|
| 9 |
+
"Recall": "recall",
|
| 10 |
+
"P": "precision",
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
MODEL_TO_MODEL = {
|
| 14 |
+
"bm25": "bm25",
|
| 15 |
+
"bge": "bge-large-en-v1.5",
|
| 16 |
+
"cohere": "Cohere-embed-english-v3.0",
|
| 17 |
+
"e5": "e5-mistral-7b-instruct",
|
| 18 |
+
"google": "google-gecko.text-embedding-preview-0409",
|
| 19 |
+
"grit": "GritLM-7B",
|
| 20 |
+
"inst-l": "instructor-large",
|
| 21 |
+
"inst-xl": "instructor-xl",
|
| 22 |
+
"openai": "text-embedding-3-large",
|
| 23 |
+
"qwen2": "gte-Qwen2-7B-instruct",
|
| 24 |
+
"qwen": "gte-Qwen1.5-7B-instruct",
|
| 25 |
+
"sbert": "all-mpnet-base-v2",
|
| 26 |
+
"sf": "SFR-Embedding-Mistral",
|
| 27 |
+
"voyage": "voyage-large-2-instruct",
|
| 28 |
+
}
|
| 29 |
+
folders = os.listdir("bright_scores/main") + os.listdir("bright_scores/long_context")
|
| 30 |
+
models = set(
|
| 31 |
+
[
|
| 32 |
+
x.split("_")[-3]
|
| 33 |
+
for x in folders
|
| 34 |
+
if (os.path.isdir("bright_scores/main/" + x) or os.path.isdir("bright_scores/long_context/" + x))
|
| 35 |
+
]
|
| 36 |
+
)
|
| 37 |
+
print(models)
|
| 38 |
+
for model in models:
|
| 39 |
+
print(f"Converting {model}")
|
| 40 |
+
result_template = {
|
| 41 |
+
"dataset_revision": "a75a0eb483f6a5233a6efc2d63d71540a4443dfb",
|
| 42 |
+
"evaluation_time": 0,
|
| 43 |
+
"kg_co2_emissions": None,
|
| 44 |
+
"mteb_version": "1.12.79",
|
| 45 |
+
"scores": {"standard": [], "long": []},
|
| 46 |
+
"task_name": "BrightRetrieval",
|
| 47 |
+
}
|
| 48 |
+
for folder in [
|
| 49 |
+
x
|
| 50 |
+
for x in folders
|
| 51 |
+
if (os.path.isdir("bright_scores/main/" + x) or os.path.isdir("bright_scores/long_context/" + x))
|
| 52 |
+
and (x.split("_")[-3] == model)
|
| 53 |
+
]:
|
| 54 |
+
if os.path.isdir("bright_scores/main/" + folder):
|
| 55 |
+
results_path = os.path.join("bright_scores/main", folder, "results.json")
|
| 56 |
+
split = "standard"
|
| 57 |
+
else:
|
| 58 |
+
results_path = os.path.join("bright_scores/long_context", folder, "results.json")
|
| 59 |
+
assert "long_True" in folder, folder
|
| 60 |
+
split = "long"
|
| 61 |
+
|
| 62 |
+
with open(results_path) as f:
|
| 63 |
+
results = json.load(f)
|
| 64 |
+
|
| 65 |
+
if len(folder.split("_")) == 4:
|
| 66 |
+
subset = folder.split("_")[0]
|
| 67 |
+
elif len(folder.split("_")) == 5:
|
| 68 |
+
subset = folder.split("_")[0] + "_" + folder.split("_")[1]
|
| 69 |
+
|
| 70 |
+
result_template["scores"][split].append(
|
| 71 |
+
{
|
| 72 |
+
"hf_subset": subset,
|
| 73 |
+
"languages": ["eng-Latn"],
|
| 74 |
+
"main_score": results["NDCG@10"],
|
| 75 |
+
**{"_at_".join([REPLACE_MAP.get(x, x) for x in k.split("@")]): v for k, v in results.items()},
|
| 76 |
+
}
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
model_folder = MODEL_TO_MODEL[model]
|
| 80 |
+
os.makedirs(f"results/{model_folder}/no_revision_available", exist_ok=True)
|
| 81 |
+
print(f"Writing to: results/{model_folder}/no_revision_available/BrightRetrieval.json")
|
| 82 |
+
with open(f"results/{model_folder}/no_revision_available/BrightRetrieval.json", "w") as f:
|
| 83 |
+
json.dump(result_template, f, indent=4)
|
scripts/merge_same_model.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import tqdm
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def line_count(file):
|
| 7 |
+
with open(file) as f:
|
| 8 |
+
return sum(1 for _ in f)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
for folder in glob.glob("results/*"):
|
| 12 |
+
if not os.path.isdir(folder):
|
| 13 |
+
continue
|
| 14 |
+
|
| 15 |
+
print(f"Processing {folder}")
|
| 16 |
+
|
| 17 |
+
# create the subfolder if it doesn't exist
|
| 18 |
+
for subfolder in glob.glob(os.path.join(folder + "-1", "*")):
|
| 19 |
+
if not os.path.isdir(subfolder):
|
| 20 |
+
continue
|
| 21 |
+
|
| 22 |
+
# if it doesn't exist in the original, create it
|
| 23 |
+
if not os.path.isdir(subfolder.replace("-1", "")):
|
| 24 |
+
os.makedirs(subfolder.replace("-1", ""), exist_ok=True)
|
| 25 |
+
|
| 26 |
+
# if there is the same folder name but with -1 at the end, we need a merge
|
| 27 |
+
if os.path.isdir(folder + "-1"):
|
| 28 |
+
# copy each file over, unless it is already there, in which case choose the longest one
|
| 29 |
+
for file in tqdm.tqdm(glob.glob(os.path.join(folder + "-1", "*", "*.json"))):
|
| 30 |
+
if os.path.exists(file.replace("-1", "")):
|
| 31 |
+
line_count_1 = line_count(file)
|
| 32 |
+
line_count_2 = line_count(file.replace("-1", ""))
|
| 33 |
+
if line_count_1 > line_count_2:
|
| 34 |
+
os.system(f"mv {file} {file.replace('-1', '')}")
|
| 35 |
+
|
| 36 |
+
else:
|
| 37 |
+
os.system(f"mv {file} {file.replace('-1', '')}")
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
"""
|
| 41 |
+
This script merges two model directories that are the same except one ends with "-1"
|
| 42 |
+
|
| 43 |
+
I renamed all files to end with "-1" because we can't have duplicate filenames. Then we need some script to merge their contents (which is this). So first rename all model folders then run this.
|
| 44 |
+
|
| 45 |
+
Then go through and remove the -1 from ones that are new and thus werent merged
|
| 46 |
+
"""
|
scripts/normalize_results_folder.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Normalize folder structure for the results folder such that results are on the same format as the mteb run command."""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import mteb
|
| 8 |
+
from packaging.version import Version
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def resolve_conflict_meta(current_path: Path, expected_path: Path) -> None:
|
| 15 |
+
"""Resolve conflict between two meta files."""
|
| 16 |
+
with current_path.open("r") as f:
|
| 17 |
+
current_meta = json.load(f)
|
| 18 |
+
|
| 19 |
+
with expected_path.open("r") as f:
|
| 20 |
+
expected_meta = json.load(f)
|
| 21 |
+
|
| 22 |
+
if current_meta == expected_meta:
|
| 23 |
+
logger.info("Meta file is the same, removing")
|
| 24 |
+
current_path.unlink()
|
| 25 |
+
else:
|
| 26 |
+
logger.info("Meta file is different, please resolve manually.")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def resolve_conflict_result(current_path: Path, expected_path: Path) -> None:
|
| 30 |
+
"""Resolve conflict between two result files."""
|
| 31 |
+
c_res = mteb.MTEBResults.from_disk(current_path)
|
| 32 |
+
e_res = mteb.MTEBResults.from_disk(expected_path)
|
| 33 |
+
c_dict_repr = c_res.model_dump()
|
| 34 |
+
e_dict_repr = e_res.model_dump()
|
| 35 |
+
|
| 36 |
+
for d in [c_dict_repr, e_dict_repr]:
|
| 37 |
+
d.pop("kg_co2_emissions")
|
| 38 |
+
d.pop("evaluation_time")
|
| 39 |
+
|
| 40 |
+
if c_dict_repr == e_dict_repr:
|
| 41 |
+
logger.info("Result file is the same, removing")
|
| 42 |
+
current_path.unlink()
|
| 43 |
+
else:
|
| 44 |
+
# check version and keep the newest
|
| 45 |
+
c_version = c_res.mteb_version
|
| 46 |
+
e_version = e_res.mteb_version
|
| 47 |
+
|
| 48 |
+
if Version(c_version) > Version(e_version):
|
| 49 |
+
logger.info("Newer version of result file, moving")
|
| 50 |
+
expected_path.parent.mkdir(parents=True, exist_ok=True)
|
| 51 |
+
current_path.rename(expected_path)
|
| 52 |
+
elif Version(c_version) == Version(e_version):
|
| 53 |
+
logger.info("Same version of result file, removing, but scores are different. Please resolve manually.")
|
| 54 |
+
logger.info(f"Old scores: {c_res.scores}")
|
| 55 |
+
logger.info(f"New scores: {e_res.scores}")
|
| 56 |
+
else:
|
| 57 |
+
logger.info("Older version of result file, removing")
|
| 58 |
+
# make sure the folder exists
|
| 59 |
+
current_path.unlink()
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def resolve_conflict(current_path: Path, expected_path: Path) -> None:
|
| 63 |
+
"""Resolve conflict between two files."""
|
| 64 |
+
if current_path.name == "model_meta.json":
|
| 65 |
+
resolve_conflict_meta(current_path, expected_path)
|
| 66 |
+
else:
|
| 67 |
+
resolve_conflict_result(current_path, expected_path)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def remove_folders_with_only_meta_files(results_folder: Path) -> None:
|
| 71 |
+
"""Remove folders that only contain a meta file."""
|
| 72 |
+
for folder in results_folder.glob("*/*"):
|
| 73 |
+
if len(list(folder.glob("*.json"))) == 1:
|
| 74 |
+
meta_file = list(folder.glob("*.json"))[0]
|
| 75 |
+
if meta_file.name == "model_meta.json":
|
| 76 |
+
logger.info(f"Removing folder {folder}")
|
| 77 |
+
meta_file.unlink()
|
| 78 |
+
folder.rmdir()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def main(attempt_to_resolve_conflict: bool) -> None:
|
| 82 |
+
"""Main function."""
|
| 83 |
+
results_folder = Path(__file__).parent.parent / "results"
|
| 84 |
+
meta_files = results_folder.glob("**/model_meta.json")
|
| 85 |
+
meta_files = list(meta_files)
|
| 86 |
+
|
| 87 |
+
conflict_encountered = False
|
| 88 |
+
|
| 89 |
+
for meta_file in tqdm(meta_files):
|
| 90 |
+
with open(meta_file, "r") as f:
|
| 91 |
+
meta = json.load(f)
|
| 92 |
+
|
| 93 |
+
mdl_name, revision = meta["name"], meta["revision"]
|
| 94 |
+
|
| 95 |
+
mdl_name = mdl_name.replace(" ", "_").replace("/", "__")
|
| 96 |
+
|
| 97 |
+
if revision is None:
|
| 98 |
+
revision = meta_file.parent.name
|
| 99 |
+
|
| 100 |
+
expected_path = results_folder / mdl_name / revision
|
| 101 |
+
if expected_path != meta_file.parent:
|
| 102 |
+
logger.info(f"Moving {meta_file.parent}/*.json to {expected_path}")
|
| 103 |
+
|
| 104 |
+
files_in_folder = meta_file.parent.glob("*.json")
|
| 105 |
+
|
| 106 |
+
for file in files_in_folder:
|
| 107 |
+
if (expected_path / file.name).exists():
|
| 108 |
+
conflict_encountered = True
|
| 109 |
+
logger.info(f"File {file} already exists in {expected_path}")
|
| 110 |
+
if attempt_to_resolve_conflict is True:
|
| 111 |
+
resolve_conflict(file, expected_path / file.name)
|
| 112 |
+
else:
|
| 113 |
+
# make sure the folder exists
|
| 114 |
+
expected_path.mkdir(parents=True, exist_ok=True)
|
| 115 |
+
file.rename(expected_path / file.name)
|
| 116 |
+
|
| 117 |
+
if conflict_encountered and not attempt_to_resolve_conflict:
|
| 118 |
+
raise Exception("Conflicts encountered.")
|
| 119 |
+
|
| 120 |
+
remove_folders_with_only_meta_files(results_folder)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
if __name__ == "__main__":
|
| 124 |
+
logging.basicConfig(
|
| 125 |
+
level=logging.INFO,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
main(attempt_to_resolve_conflict=True)
|
scripts/remove_ablations.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import tqdm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
for file in tqdm.tqdm(glob.glob("results/*/*.json")):
|
| 8 |
+
print(file)
|
| 9 |
+
if "Instruction" not in file:
|
| 10 |
+
continue
|
| 11 |
+
with open(file, "r") as fin:
|
| 12 |
+
data = json.load(fin)
|
| 13 |
+
if "individual" in data["test"]:
|
| 14 |
+
del data["test"]["individual"]
|
| 15 |
+
|
| 16 |
+
with open(file, "w") as fout:
|
| 17 |
+
json.dump(data, fout, indent=2)
|
tests/__init__.py
ADDED
|
File without changes
|
tests/mock_cache_dir/readme.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
this is just a mock cache directory, which implements a folder with a link to the results folder in root.
|
tests/mock_cache_dir/results/results
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
../../../results
|
tests/test_correct_folder_structure.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
results_folder = Path(__file__).parent.parent / "results"
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_metafiles():
|
| 10 |
+
meta_files = results_folder.glob("**/model_meta.json")
|
| 11 |
+
meta_files = list(meta_files)
|
| 12 |
+
return meta_files
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@pytest.mark.parametrize("meta_file", get_metafiles())
|
| 16 |
+
def test_correct_folder_structure(meta_file):
|
| 17 |
+
"""
|
| 18 |
+
the folders should be structured as follows:
|
| 19 |
+
results/model_name/revision/*json
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
with meta_file.open("r") as f:
|
| 23 |
+
meta = json.load(f)
|
| 24 |
+
|
| 25 |
+
mdl_name, revision = meta["name"], meta["revision"]
|
| 26 |
+
|
| 27 |
+
mdl_name = mdl_name.replace(" ", "_").replace("/", "__")
|
| 28 |
+
|
| 29 |
+
if revision is None:
|
| 30 |
+
revision = meta_file.parent.name
|
| 31 |
+
|
| 32 |
+
if meta_file.parent.parts[-1] == "external":
|
| 33 |
+
revision = "external"
|
| 34 |
+
|
| 35 |
+
expected_path = results_folder / mdl_name / revision
|
| 36 |
+
assert expected_path == meta_file.parent
|
| 37 |
+
assert expected_path.exists()
|
| 38 |
+
assert len(list(expected_path.glob("*.json"))) > 0
|
| 39 |
+
for file in expected_path.glob("*.json"):
|
| 40 |
+
assert file.exists()
|
| 41 |
+
assert file.is_file()
|
| 42 |
+
assert file.parent == expected_path
|
| 43 |
+
assert file.suffix == ".json"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
folders_without_meta = [ # please do not add to this list it is only intended for backwards compatibility. Future results should have a model_meta.json file
|
| 47 |
+
("Alibaba-NLP__gte-Qwen1.5-7B-instruct", "no_revision_available"),
|
| 48 |
+
("Alibaba-NLP__gte-Qwen2-7B-instruct", "no_revision_available"),
|
| 49 |
+
("BAAI__bge-base-en", "no_revision_available"),
|
| 50 |
+
("BAAI__bge-base-en-v1.5-instruct", "a5beb1e3e68b9ab74eb54cfd186867f64f240e1a"),
|
| 51 |
+
("BAAI__bge-base-zh", "no_revision_available"),
|
| 52 |
+
("BAAI__bge-base-zh-v1.5", "no_revision_available"),
|
| 53 |
+
("BAAI__bge-large-en", "no_revision_available"),
|
| 54 |
+
("BAAI__bge-large-en-v1.5", "no_revision_available"),
|
| 55 |
+
("BAAI__bge-large-en-v1.5-instruct", "d4aa6901d3a41ba39fb536a557fa166f842b0e09"),
|
| 56 |
+
("BAAI__bge-large-zh", "no_revision_available"),
|
| 57 |
+
("BAAI__bge-large-zh-noinstruct", "no_revision_available"),
|
| 58 |
+
("BAAI__bge-large-zh-v1.5", "no_revision_available"),
|
| 59 |
+
("BAAI__bge-m3", "no_revision_available"),
|
| 60 |
+
("BAAI__bge-m3-instruct", "5617a9f61b028005a4858fdac845db406aefb181"),
|
| 61 |
+
("BAAI__bge-small-en-v1.5-instruct", "5c38ec7c405ec4b44b94cc5a9bb96e735b38267a"),
|
| 62 |
+
("BAAI__bge-small-zh", "no_revision_available"),
|
| 63 |
+
("BAAI__bge-small-zh-v1.5", "no_revision_available"),
|
| 64 |
+
("Cohere__Cohere-embed-english-v3.0", "no_revision_available"),
|
| 65 |
+
("Cohere__Cohere-embed-english-v3.0-instruct", "no_revision_available"),
|
| 66 |
+
("Cohere__Cohere-embed-multilingual-light-v3.0", "no_revision_available"),
|
| 67 |
+
("Cohere__Cohere-embed-multilingual-v3.0", "no_revision_available"),
|
| 68 |
+
("FacebookAI__xlm-roberta-base", "no_revision_available"),
|
| 69 |
+
("FacebookAI__xlm-roberta-large", "no_revision_available"),
|
| 70 |
+
("Geotrend__bert-base-10lang-cased", "no_revision_available"),
|
| 71 |
+
("Geotrend__bert-base-15lang-cased", "no_revision_available"),
|
| 72 |
+
("Geotrend__bert-base-25lang-cased", "no_revision_available"),
|
| 73 |
+
("Geotrend__distilbert-base-25lang-cased", "no_revision_available"),
|
| 74 |
+
("Geotrend__distilbert-base-en-fr-cased", "no_revision_available"),
|
| 75 |
+
("Geotrend__distilbert-base-en-fr-es-pt-it-cased", "no_revision_available"),
|
| 76 |
+
("Geotrend__distilbert-base-fr-cased", "no_revision_available"),
|
| 77 |
+
("KBLab__electra-small-swedish-cased-discriminator", "no_revision_available"),
|
| 78 |
+
("KBLab__sentence-bert-swedish-cased", "no_revision_available"),
|
| 79 |
+
("KB__bert-base-swedish-cased", "no_revision_available"),
|
| 80 |
+
("McGill-NLP__LLM2Vec-Llama-2-7b-chat-hf-mntp-supervised", "no_revision_available"),
|
| 81 |
+
("McGill-NLP__LLM2Vec-Llama-2-unsupervised", "no_revision_available"),
|
| 82 |
+
("McGill-NLP__LLM2Vec-Meta-Llama-3-supervised", "no_revision_available"),
|
| 83 |
+
("McGill-NLP__LLM2Vec-Meta-Llama-3-unsupervised", "no_revision_available"),
|
| 84 |
+
("McGill-NLP__LLM2Vec-Mistral-supervised", "no_revision_available"),
|
| 85 |
+
("McGill-NLP__LLM2Vec-Mistral-unsupervised", "no_revision_available"),
|
| 86 |
+
("McGill-NLP__LLM2Vec-Sheared-Llama-supervised", "no_revision_available"),
|
| 87 |
+
("McGill-NLP__LLM2Vec-Sheared-Llama-unsupervised", "no_revision_available"),
|
| 88 |
+
(
|
| 89 |
+
"Muennighoff__SGPT-1.3B-weightedmean-msmarco-specb-bitfit",
|
| 90 |
+
"no_revision_available",
|
| 91 |
+
),
|
| 92 |
+
(
|
| 93 |
+
"Muennighoff__SGPT-125M-weightedmean-msmarco-specb-bitfit",
|
| 94 |
+
"no_revision_available",
|
| 95 |
+
),
|
| 96 |
+
(
|
| 97 |
+
"Muennighoff__SGPT-125M-weightedmean-msmarco-specb-bitfit-doc",
|
| 98 |
+
"no_revision_available",
|
| 99 |
+
),
|
| 100 |
+
(
|
| 101 |
+
"Muennighoff__SGPT-125M-weightedmean-msmarco-specb-bitfit-que",
|
| 102 |
+
"no_revision_available",
|
| 103 |
+
),
|
| 104 |
+
("Muennighoff__SGPT-125M-weightedmean-nli-bitfit", "no_revision_available"),
|
| 105 |
+
(
|
| 106 |
+
"Muennighoff__SGPT-2.7B-weightedmean-msmarco-specb-bitfit",
|
| 107 |
+
"no_revision_available",
|
| 108 |
+
),
|
| 109 |
+
(
|
| 110 |
+
"Muennighoff__SGPT-5.8B-weightedmean-msmarco-specb-bitfit",
|
| 111 |
+
"no_revision_available",
|
| 112 |
+
),
|
| 113 |
+
(
|
| 114 |
+
"Muennighoff__SGPT-5.8B-weightedmean-msmarco-specb-bitfit-que",
|
| 115 |
+
"no_revision_available",
|
| 116 |
+
),
|
| 117 |
+
("Muennighoff__SGPT-5.8B-weightedmean-nli-bitfit", "no_revision_available"),
|
| 118 |
+
("NbAiLab__nb-bert-base", "no_revision_available"),
|
| 119 |
+
("NbAiLab__nb-bert-large", "no_revision_available"),
|
| 120 |
+
("Salesforce__SFR-Embedding-Mistral", "no_revision_available"),
|
| 121 |
+
(
|
| 122 |
+
"T-Systems-onsite__cross-en-de-roberta-sentence-transformer",
|
| 123 |
+
"no_revision_available",
|
| 124 |
+
),
|
| 125 |
+
("Wissam42__sentence-croissant-llm-base", "no_revision_available"),
|
| 126 |
+
("aliyun__OpenSearch-text-hybrid", "no_revision_available"),
|
| 127 |
+
("almanach__camembert-base", "no_revision_available"),
|
| 128 |
+
("almanach__camembert-large", "no_revision_available"),
|
| 129 |
+
("amazon__titan-embed-text-v1", "no_revision_available"),
|
| 130 |
+
("baichuan-ai__text-embedding", "no_revision_available"),
|
| 131 |
+
("bigscience-data__sgpt-bloom-1b7-nli", "no_revision_available"),
|
| 132 |
+
("bigscience-data__sgpt-bloom-7b1-msmarco", "no_revision_available"),
|
| 133 |
+
("bm25", "no_revision_available"),
|
| 134 |
+
("bm25s", "0_1_10"),
|
| 135 |
+
("castorini__monobert-large-msmarco", "no_revision_available"),
|
| 136 |
+
("castorini__monot5-3b-msmarco-10k", "no_revision_available"),
|
| 137 |
+
("castorini__monot5-base-msmarco-10k", "no_revision_available"),
|
| 138 |
+
("chcaa__dfm-encoder-large-v1", "no_revision_available"),
|
| 139 |
+
("dangvantuan__sentence-camembert-base", "no_revision_available"),
|
| 140 |
+
("dangvantuan__sentence-camembert-large", "no_revision_available"),
|
| 141 |
+
("deepfile__embedder-100p", "no_revision_available"),
|
| 142 |
+
("deepset__gbert-base", "no_revision_available"),
|
| 143 |
+
("deepset__gbert-large", "no_revision_available"),
|
| 144 |
+
("deepset__gelectra-base", "no_revision_available"),
|
| 145 |
+
("deepset__gelectra-large", "no_revision_available"),
|
| 146 |
+
("distilbert__distilbert-base-uncased", "no_revision_available"),
|
| 147 |
+
("dwzhu__e5-base-4k", "no_revision_available"),
|
| 148 |
+
("elastic__elser-v2", "no_revision_available"),
|
| 149 |
+
("facebook__contriever", "2bd46a25019aeea091fd42d1f0fd4801675cf699"),
|
| 150 |
+
("facebook__contriever-instruct", "2bd46a25019aeea091fd42d1f0fd4801675cf699"),
|
| 151 |
+
("facebook__dpr-ctx_encoder-multiset-base", "no_revision_available"),
|
| 152 |
+
("facebook__dragon-plus-context-encoder", "no_revision_available"),
|
| 153 |
+
("facebook__tart-full-flan-t5-xl", "no_revision_available"),
|
| 154 |
+
("facebookresearch__LASER2", "no_revision_available"),
|
| 155 |
+
("facebookresearch__dragon-plus", "no_revision_available"),
|
| 156 |
+
("facebookresearch__dragon-plus-instruct", "no_revision_available"),
|
| 157 |
+
("intfloat__e5-base", "no_revision_available"),
|
| 158 |
+
("intfloat__e5-base-v2", "no_revision_available"),
|
| 159 |
+
("intfloat__e5-large", "no_revision_available"),
|
| 160 |
+
("intfloat__e5-large-v2", "no_revision_available"),
|
| 161 |
+
("intfloat__e5-mistral-7b-instruct", "no_revision_available"),
|
| 162 |
+
(
|
| 163 |
+
"intfloat__e5-mistral-7b-instruct-noinstruct",
|
| 164 |
+
"07163b72af1488142a360786df853f237b1a3ca1",
|
| 165 |
+
),
|
| 166 |
+
("intfloat__e5-small", "no_revision_available"),
|
| 167 |
+
("intfloat__multilingual-e5-base", "no_revision_available"),
|
| 168 |
+
("intfloat__multilingual-e5-large", "no_revision_available"),
|
| 169 |
+
("intfloat__multilingual-e5-small", "no_revision_available"),
|
| 170 |
+
("ipipan__herbert-base-retrieval-v2", "no_revision_available"),
|
| 171 |
+
("ipipan__silver-retriever-base-v1", "no_revision_available"),
|
| 172 |
+
("izhx__udever-bloom-1b1", "no_revision_available"),
|
| 173 |
+
("izhx__udever-bloom-560m", "no_revision_available"),
|
| 174 |
+
("jhu-clsp__FollowIR-7B", "no_revision_available"),
|
| 175 |
+
("jinaai__jina-embeddings-v2-base-en", "no_revision_available"),
|
| 176 |
+
("jonfd__electra-small-nordic", "no_revision_available"),
|
| 177 |
+
("ltg__norbert3-base", "no_revision_available"),
|
| 178 |
+
("ltg__norbert3-large", "no_revision_available"),
|
| 179 |
+
("meta-llama__llama-2-7b-chat", "no_revision_available"),
|
| 180 |
+
("mistral__mistral-embed", "no_revision_available"),
|
| 181 |
+
("mistralai__mistral-7b-instruct-v0.2", "no_revision_available"),
|
| 182 |
+
("moka-ai__m3e-base", "no_revision_available"),
|
| 183 |
+
("moka-ai__m3e-large", "no_revision_available"),
|
| 184 |
+
("nomic-ai__nomic-embed-text-v1", "no_revision_available"),
|
| 185 |
+
("nomic-ai__nomic-embed-text-v1.5-128", "no_revision_available"),
|
| 186 |
+
("nomic-ai__nomic-embed-text-v1.5-256", "no_revision_available"),
|
| 187 |
+
("nomic-ai__nomic-embed-text-v1.5-512", "no_revision_available"),
|
| 188 |
+
("nomic-ai__nomic-embed-text-v1.5-64", "no_revision_available"),
|
| 189 |
+
("nthakur__contriever-base-msmarco", "no_revision_available"),
|
| 190 |
+
("openai__text-embedding-3-large-256", "no_revision_available"),
|
| 191 |
+
("openai__text-embedding-3-large-instruct", "no_revision_available"),
|
| 192 |
+
("openai__text-embedding-3-small-instruct", "no_revision_available"),
|
| 193 |
+
("openai__text-embedding-ada-002", "no_revision_available"),
|
| 194 |
+
("openai__text-embedding-ada-002-instruct", "no_revision_available"),
|
| 195 |
+
("openai__text-search-ada-001", "no_revision_available"),
|
| 196 |
+
("openai__text-search-ada-doc-001", "no_revision_available"),
|
| 197 |
+
("openai__text-search-babbage-001", "no_revision_available"),
|
| 198 |
+
("openai__text-search-curie-001", "no_revision_available"),
|
| 199 |
+
("openai__text-search-davinci-001", "no_revision_available"),
|
| 200 |
+
("openai__text-similarity-ada-001", "no_revision_available"),
|
| 201 |
+
("openai__text-similarity-babbage-001", "no_revision_available"),
|
| 202 |
+
("openai__text-similarity-curie-001", "no_revision_available"),
|
| 203 |
+
("openai__text-similarity-davinci-001", "no_revision_available"),
|
| 204 |
+
("orionweller__tart-dual-contriever-msmarco", "no_revision_available"),
|
| 205 |
+
("princeton-nlp__sup-simcse-bert-base-uncased", "no_revision_available"),
|
| 206 |
+
("princeton-nlp__unsup-simcse-bert-base-uncased", "no_revision_available"),
|
| 207 |
+
("sdadas__st-polish-paraphrase-from-distilroberta", "no_revision_available"),
|
| 208 |
+
("sdadas__st-polish-paraphrase-from-mpnet", "no_revision_available"),
|
| 209 |
+
("sentence-transformers__LaBSE", "no_revision_available"),
|
| 210 |
+
("sentence-transformers__all-MiniLM-L6-v2", "no_revision_available"),
|
| 211 |
+
(
|
| 212 |
+
"sentence-transformers__all-MiniLM-L6-v2-instruct",
|
| 213 |
+
"8b3219a92973c328a8e22fadcfa821b5dc75636a",
|
| 214 |
+
),
|
| 215 |
+
(
|
| 216 |
+
"sentence-transformers__all-mpnet-base-v2-instruct",
|
| 217 |
+
"84f2bcc00d77236f9e89c8a360a00fb1139bf47d",
|
| 218 |
+
),
|
| 219 |
+
("sentence-transformers__allenai-specter", "no_revision_available"),
|
| 220 |
+
(
|
| 221 |
+
"sentence-transformers__average_word_embeddings_glove.6B.300d",
|
| 222 |
+
"no_revision_available",
|
| 223 |
+
),
|
| 224 |
+
(
|
| 225 |
+
"sentence-transformers__average_word_embeddings_komninos",
|
| 226 |
+
"no_revision_available",
|
| 227 |
+
),
|
| 228 |
+
("sentence-transformers__distiluse-base-multilingual-cased-v2", ".gitkeep"),
|
| 229 |
+
(
|
| 230 |
+
"sentence-transformers__distiluse-base-multilingual-cased-v2",
|
| 231 |
+
"no_revision_available",
|
| 232 |
+
),
|
| 233 |
+
("sentence-transformers__gtr-t5-base", "no_revision_available"),
|
| 234 |
+
("sentence-transformers__gtr-t5-large", "no_revision_available"),
|
| 235 |
+
("sentence-transformers__gtr-t5-xl", "no_revision_available"),
|
| 236 |
+
("sentence-transformers__gtr-t5-xxl", "no_revision_available"),
|
| 237 |
+
("sentence-transformers__msmarco-bert-co-condensor", "no_revision_available"),
|
| 238 |
+
("sentence-transformers__multi-qa-MiniLM-L6-cos-v1", "no_revision_available"),
|
| 239 |
+
(
|
| 240 |
+
"sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2",
|
| 241 |
+
"no_revision_available",
|
| 242 |
+
),
|
| 243 |
+
(
|
| 244 |
+
"sentence-transformers__paraphrase-multilingual-mpnet-base-v2",
|
| 245 |
+
"no_revision_available",
|
| 246 |
+
),
|
| 247 |
+
("sentence-transformers__sentence-t5-base", "no_revision_available"),
|
| 248 |
+
("sentence-transformers__sentence-t5-large", "no_revision_available"),
|
| 249 |
+
("sentence-transformers__sentence-t5-xl", "no_revision_available"),
|
| 250 |
+
("sentence-transformers__sentence-t5-xxl", "no_revision_available"),
|
| 251 |
+
("sentence-transformers__use-cmlm-multilingual", "no_revision_available"),
|
| 252 |
+
("shibing624__text2vec-base-chinese", "no_revision_available"),
|
| 253 |
+
("shibing624__text2vec-base-multilingual", "no_revision_available"),
|
| 254 |
+
("shibing624__text2vec-large-chinese", "no_revision_available"),
|
| 255 |
+
("silk-road__luotuo-bert-medium", "no_revision_available"),
|
| 256 |
+
("uklfr__gottbert-base", "no_revision_available"),
|
| 257 |
+
("vesteinn__DanskBERT", "no_revision_available"),
|
| 258 |
+
("voyageai__voyage-2", "no_revision_available"),
|
| 259 |
+
("voyageai__voyage-code-2", "no_revision_available"),
|
| 260 |
+
("voyageai__voyage-large-2-instruct", "no_revision_available"),
|
| 261 |
+
("voyageai__voyage-law-2", "no_revision_available"),
|
| 262 |
+
("voyageai__voyage-lite-01-instruct", "no_revision_available"),
|
| 263 |
+
("voyageai__voyage-lite-02-instruct", "no_revision_available"),
|
| 264 |
+
("voyageai__voyage-multilingual-2", "no_revision_available"),
|
| 265 |
+
("vprelovac__universal-sentence-encoder-multilingual-3", "no_revision_available"),
|
| 266 |
+
(
|
| 267 |
+
"vprelovac__universal-sentence-encoder-multilingual-large-3",
|
| 268 |
+
"no_revision_available",
|
| 269 |
+
),
|
| 270 |
+
]
|
| 271 |
+
|
| 272 |
+
model_rev_pairs = [
|
| 273 |
+
(model_folder, rev_folder)
|
| 274 |
+
for model_folder in results_folder.glob("*")
|
| 275 |
+
for rev_folder in model_folder.glob("*")
|
| 276 |
+
if model_folder.name not in [".DS_Store"] and rev_folder.name not in [".DS_Store"]
|
| 277 |
+
]
|
| 278 |
+
pairs_no_meta = [
|
| 279 |
+
(pair[0].name, pair[1].name)
|
| 280 |
+
for pair in model_rev_pairs
|
| 281 |
+
if not (pair[1] / "model_meta.json").exists()
|
| 282 |
+
]
|
| 283 |
+
pairs_no_meta.sort()
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
p = [p for p in model_rev_pairs if "Haon-Chen" in p[0].name][0][1]/ "model_meta.json"
|
| 287 |
+
p.exists()
|
tests/test_ensure_correct_metadata.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
from tests.test_correct_folder_structure import folders_without_meta, results_folder
|
| 6 |
+
|
| 7 |
+
model_rev_pairs = [
|
| 8 |
+
(model_folder, rev_folder)
|
| 9 |
+
for model_folder in results_folder.glob("*")
|
| 10 |
+
for rev_folder in model_folder.glob("*")
|
| 11 |
+
if model_folder.name not in [".DS_Store"]
|
| 12 |
+
and rev_folder.name not in [".DS_Store"]
|
| 13 |
+
and ((model_folder.name, rev_folder.name) not in folders_without_meta)
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@pytest.mark.parametrize("model_rev_pair", model_rev_pairs)
|
| 18 |
+
def test_model_meta_in_folders(model_rev_pair):
|
| 19 |
+
"""
|
| 20 |
+
Models added after the 26th of November should contain a model_meta.json file
|
| 21 |
+
"""
|
| 22 |
+
model_folder, rev_folder = model_rev_pair
|
| 23 |
+
|
| 24 |
+
meta_file = rev_folder / "model_meta.json"
|
| 25 |
+
assert meta_file.exists()
|
| 26 |
+
assert meta_file.is_file()
|
| 27 |
+
assert meta_file.parent == rev_folder
|
| 28 |
+
assert meta_file.suffix == ".json"
|
| 29 |
+
assert meta_file.stem == "model_meta"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# please do not add to this list, this is only for historic results, all new results should include a revision ID
|
| 33 |
+
revision_exceptions = [
|
| 34 |
+
("castorini__mdpr-tied-pft-msmarco", "no_revision_available"),
|
| 35 |
+
("voyageai__voyage-3", "no_revision_available"),
|
| 36 |
+
("sentence-transformers__all-mpnet-base-v2", "no_revision_available"),
|
| 37 |
+
("Snowflake__snowflake-arctic-embed-m-v1.5", "no_revision_available"),
|
| 38 |
+
("sentence-transformers__all-MiniLM-L12-v2", "no_revision_available"),
|
| 39 |
+
("nthakur__mcontriever-base-msmarco", "no_revision_available"),
|
| 40 |
+
('voyageai__voyage-3-lite', 'no_revision_available')
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@pytest.mark.parametrize("model_rev_pair", model_rev_pairs)
|
| 45 |
+
def test_revision_is_specified_for_new_additions(model_rev_pair):
|
| 46 |
+
"""
|
| 47 |
+
Models added after 26th of November should include a revision ID and can not use the "no_revision_available" fallback.
|
| 48 |
+
"""
|
| 49 |
+
model_folder, rev_folder = model_rev_pair
|
| 50 |
+
if (model_folder.name, rev_folder.name) not in revision_exceptions:
|
| 51 |
+
meta_file = rev_folder / "model_meta.json"
|
| 52 |
+
with meta_file.open("r") as f:
|
| 53 |
+
meta = json.load(f)
|
| 54 |
+
assert meta["revision"].lower() not in [
|
| 55 |
+
"no_revision_available",
|
| 56 |
+
"",
|
| 57 |
+
"na",
|
| 58 |
+
"no-revision-available",
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@pytest.mark.parametrize("model_rev_pair", model_rev_pairs)
|
| 63 |
+
def test_organization_is_specified_for_new_additions(model_rev_pair):
|
| 64 |
+
"""
|
| 65 |
+
Models added after 26th of November should include a organization ID within their name, e.g. "myorg/my_embedding_model".
|
| 66 |
+
|
| 67 |
+
This is to avoid mispecified names such as "myorg__my_embedding_model" and similar.
|
| 68 |
+
"""
|
| 69 |
+
model_folder, rev_folder = model_rev_pair
|
| 70 |
+
meta_file = rev_folder / "model_meta.json"
|
| 71 |
+
with meta_file.open("r") as f:
|
| 72 |
+
meta = json.load(f)
|
| 73 |
+
assert "/" in meta["name"]
|
tests/test_load_datasets.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
import pytest
|
| 3 |
+
from datasets import load_dataset
|
| 4 |
+
from results import MODELS
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@pytest.mark.parametrize("model", MODELS)
|
| 8 |
+
@pytest.mark.xfail(reason="If new model added this test will fail")
|
| 9 |
+
def test_load_results_from_datasets(model):
|
| 10 |
+
"""Ensures that all models can be imported from dataset"""
|
| 11 |
+
path = Path(__file__).parent.parent / "results.py"
|
| 12 |
+
ds = load_dataset(str(path.absolute()), model, trust_remote_code=True)
|
tests/test_load_results.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
import mteb
|
| 5 |
+
from mteb.load_results.task_results import TaskResult
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def test_load_results():
|
| 9 |
+
"""Ensures that files can be loaded using MTEB"""
|
| 10 |
+
tests_path = Path(__file__).parent / "mock_cache_dir"
|
| 11 |
+
|
| 12 |
+
os.environ["MTEB_CACHE"] = str(tests_path)
|
| 13 |
+
|
| 14 |
+
results = mteb.load_results(download_latest=False)
|
| 15 |
+
|
| 16 |
+
assert isinstance(results, mteb.BenchmarkResults)
|
| 17 |
+
for model_results in results.model_results:
|
| 18 |
+
for model_result in model_results:
|
| 19 |
+
assert isinstance(model_result, TaskResult)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_load_results_mteb():
|
| 23 |
+
test_model_name = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
|
| 24 |
+
model_results = mteb.load_results(models=[test_model_name])
|
| 25 |
+
model_result = model_results[0]
|
| 26 |
+
assert model_result.model_name == test_model_name
|
| 27 |
+
assert model_result.model_revision == "bf3bf13ab40c3157080a7ab344c831b9ad18b5eb"
|