hc99's picture
Add files using upload-large-folder tool
73cc8d2 verified
import logging
import pytest
from mteb.abstasks.TaskMetadata import TaskMetadata
from mteb.overview import get_tasks
# Historic datasets without filled metadata. Do NOT add new datasets to this list.
_HISTORIC_DATASETS = [
"AmazonReviewsClassification",
"MasakhaNEWSClassification",
"MassiveIntentClassification",
"MassiveScenarioClassification",
"MTOPDomainClassification",
"MTOPIntentClassification",
"NordicLangClassification",
"ScalaClassification",
"NoRecClassification",
"NorwegianParliamentClassification",
"PunjabiNewsClassification",
"CBD",
"PolEmo2.0-IN",
"PolEmo2.0-OUT",
"AllegroReviews",
"PAC",
"SweRecClassification",
"TNews",
"IFlyTek",
"MultilingualSentiment",
"JDReview",
"OnlineShopping",
"Waimai",
"BlurbsClusteringP2P",
"BlurbsClusteringS2S",
"TenKGnadClusteringP2P",
"TenKGnadClusteringS2S",
"ArxivClusteringP2P",
"ArxivClusteringS2S",
"BigPatentClustering",
"BiorxivClusteringP2P",
"BiorxivClusteringS2S",
"MedrxivClusteringP2P",
"MedrxivClusteringS2S",
"RedditClustering",
"RedditClusteringP2P",
"StackExchangeClustering",
"StackExchangeClusteringP2P",
"TwentyNewsgroupsClustering",
"WikiCitiesClustering",
"AlloProfClusteringP2P",
"AlloProfClusteringS2S",
"HALClusteringS2S",
"MLSUMClusteringP2P",
"MLSUMClusteringS2S",
"MasakhaNEWSClusteringP2P",
"MasakhaNEWSClusteringS2S",
"SNLClustering",
"VGClustering",
"EightTagsClustering",
"RomaniBibleClustering",
"FloresClusteringS2S",
"SpanishNewsClusteringP2P",
"SwednClustering",
"CLSClusteringS2S",
"CLSClusteringP2P",
"ThuNewsClusteringS2S",
"ThuNewsClusteringP2P",
"TV2Nordretrieval",
"TwitterHjerneRetrieval",
"GerDaLIR",
"GerDaLIRSmall",
"GermanDPR",
"GermanQuAD-Retrieval",
"LegalQuAD",
"AILACasedocs",
"AILAStatutes",
"ArguAna",
"ClimateFEVER",
"CQADupstackAndroidRetrieval",
"CQADupstackEnglishRetrieval",
"CQADupstackGamingRetrieval",
"CQADupstackGisRetrieval",
"CQADupstackMathematicaRetrieval",
"CQADupstackPhysicsRetrieval",
"CQADupstackProgrammersRetrieval",
"CQADupstackStatsRetrieval",
"CQADupstackTexRetrieval",
"CQADupstackUnixRetrieval",
"CQADupstackWebmastersRetrieval",
"CQADupstackWordpressRetrieval",
"DBPedia",
"FEVER",
"FiQA2018",
"HagridRetrieval",
"HotpotQA",
"LegalBenchConsumerContractsQA",
"LegalBenchCorporateLobbying",
"LegalSummarization",
"LEMBNeedleRetrieval",
"LEMBPasskeyRetrieval",
"MSMARCO",
"MSMARCOv2",
"NarrativeQARetrieval",
"NFCorpus",
"NQ",
"QuoraRetrieval",
"SCIDOCS",
"SciFact",
"Touche2020",
"TRECCOVID",
"AlloprofRetrieval",
"BSARDRetrieval",
"SyntecRetrieval",
"JaQuADRetrieval",
"Ko-miracl",
"Ko-StrategyQA",
"MintakaRetrieval",
"MIRACLRetrieval",
"MultiLongDocRetrieval",
"XMarket",
"SNLRetrieval",
"ArguAna-PL",
"DBPedia-PL",
"FiQA-PL",
"HotpotQA-PL",
"MSMARCO-PL",
"NFCorpus-PL",
"NQ-PL",
"Quora-PL",
"SCIDOCS-PL",
"SciFact-PL",
"TRECCOVID-PL",
"SpanishPassageRetrievalS2P",
"SpanishPassageRetrievalS2S",
"SweFaqRetrieval",
"T2Retrieval",
"MMarcoRetrieval",
"DuRetrieval",
"CovidRetrieval",
"CmedqaRetrieval",
"EcomRetrieval",
"MedicalRetrieval",
"VideoRetrieval",
"LeCaRDv2",
"SprintDuplicateQuestions",
"TwitterSemEval2015",
"TwitterURLCorpus",
"OpusparcusPC",
"PawsX",
"SICK-E-PL",
"PpcPC",
"CDSC-E",
"PSC",
"Ocnli",
"Cmnli",
"AskUbuntuDupQuestions",
"MindSmallReranking",
"SciDocsRR",
"StackOverflowDupQuestions",
"AlloprofReranking",
"SyntecReranking",
"MIRACLReranking",
"T2Reranking",
"MMarcoReranking",
"CMedQAv1-reranking",
"CMedQAv2-reranking",
"GermanSTSBenchmark",
"BIOSSES",
"SICK-R",
"STS12",
"STS13",
"STS14",
"STS15",
"STS16",
"STSBenchmark",
"FinParaSTS",
"SICKFr",
"KLUE-STS",
"KorSTS",
"STS17",
"STS22",
"STSBenchmarkMultilingualSTS",
"SICK-R-PL",
"CDSC-R",
"RonSTS",
"STSES",
"ATEC",
"BQ",
"LCQMC",
"PAWSX",
"STSB",
"AFQMC",
"QBQTC",
"SummEval",
"SummEvalFr",
"ArxivClusteringP2P.v2",
"SwednClusteringP2P",
"SwednClusteringS2S",
"MalayalamNewsClassification",
"TamilNewsClassification",
"ArxivClusteringP2P.v3",
"TenKGnadClusteringP2P.v2",
"TenKGnadClusteringS2S.v2",
]
def test_given_dataset_config_then_it_is_valid():
my_task = TaskMetadata(
name="MyTask",
dataset={
"path": "test/dataset",
"revision": "1.0",
},
description="testing",
reference=None,
type="Classification",
category="s2s",
eval_splits=["test"],
eval_langs=["eng-Latn"],
main_score="map",
date=None,
form=None,
domains=None,
license=None,
task_subtypes=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation="",
avg_character_length=None,
n_samples=None,
)
assert my_task.dataset["path"] == "test/dataset"
assert my_task.dataset["revision"] == "1.0"
def test_given_missing_dataset_path_then_it_throws():
with pytest.raises(ValueError):
TaskMetadata(
name="MyTask",
description="testing",
reference=None,
type="Classification",
category="s2s",
eval_splits=["test"],
eval_langs=["eng-Latn"],
main_score="map",
date=None,
form=None,
domains=None,
license=None,
task_subtypes=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation="",
avg_character_length=None,
n_samples=None,
)
def test_given_missing_revision_path_then_it_throws():
with pytest.raises(ValueError):
TaskMetadata(
name="MyTask",
dataset={
"path": "test/dataset",
},
description="testing",
reference=None,
type="Classification",
category="s2s",
eval_splits=["test"],
eval_langs=["eng-Latn"],
main_score="map",
date=None,
form=None,
domains=None,
license=None,
task_subtypes=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation="",
avg_character_length=None,
n_samples=None,
)
def test_given_none_revision_path_then_it_logs_warning(caplog):
with caplog.at_level(logging.WARNING):
my_task = TaskMetadata(
name="MyTask",
dataset={"path": "test/dataset", "revision": None},
description="testing",
reference=None,
type="Classification",
category="s2s",
eval_splits=["test"],
eval_langs=["eng-Latn"],
main_score="map",
date=None,
form=None,
domains=None,
license=None,
task_subtypes=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation="",
avg_character_length=None,
n_samples=None,
)
assert my_task.dataset["revision"] is None
warning_logs = [
record for record in caplog.records if record.levelname == "WARNING"
]
assert len(warning_logs) == 1
assert (
warning_logs[0].message == "Revision missing for the dataset test/dataset. "
"It is encourage to specify a dataset revision for reproducability."
)
def test_unfilled_metadata_is_not_filled():
assert (
TaskMetadata(
name="MyTask",
dataset={
"path": "test/dataset",
"revision": "1.0",
},
description="testing",
reference=None,
type="Classification",
category="s2s",
eval_splits=["test"],
eval_langs=["eng-Latn"],
main_score="map",
date=None,
form=None,
domains=None,
license=None,
task_subtypes=None,
socioeconomic_status=None,
annotations_creators=None,
dialect=None,
text_creation=None,
bibtex_citation="",
avg_character_length=None,
n_samples=None,
).is_filled()
is False
)
def test_filled_metadata_is_filled():
assert (
TaskMetadata(
name="MyTask",
dataset={
"path": "test/dataset",
"revision": "1.0",
},
description="testing",
reference="https://aclanthology.org/W19-6138/",
type="Classification",
category="s2s",
eval_splits=["test"],
eval_langs=["eng-Latn"],
main_score="map",
date=("2021-01-01", "2021-12-31"),
form=["written"],
domains=["Non-fiction"],
license="mit",
task_subtypes=["Thematic clustering"],
socioeconomic_status="high",
annotations_creators="expert-annotated",
dialect=[],
text_creation="found",
bibtex_citation="Someone et al",
avg_character_length={"train": 1},
n_samples={"train": 1},
).is_filled()
is True
)
def test_all_metadata_is_filled():
all_tasks = get_tasks()
unfilled_metadata = []
for task in all_tasks:
if task.metadata.name not in _HISTORIC_DATASETS:
if not task.metadata.is_filled():
unfilled_metadata.append(task.metadata.name)
if unfilled_metadata:
raise ValueError(
f"The metadata of the following datasets is not filled: {unfilled_metadata}"
)