diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/NorwegianCourtsBitextMining.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/NorwegianCourtsBitextMining.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a67b202d54dc9c5417f071e767601c9a45e68b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/NorwegianCourtsBitextMining.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskBitextMining +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class NorwegianCourtsBitextMining(AbsTaskBitextMining): + metadata = TaskMetadata( + name="NorwegianCourtsBitextMining", + dataset={ + "path": "kardosdrur/norwegian-courts", + "revision": "d79af07e969a6678fcbbe819956840425816468f", + }, + description="Nynorsk and Bokmål parallel corpus from Norwegian courts. Norwegian courts have two standardised written languages. Bokmål is a variant closer to Danish, while Nynorsk was created to resemble regional dialects of Norwegian.", + reference="https://opus.nlpl.eu/index.php", + type="BitextMining", + category="s2s", + eval_splits=["test"], + eval_langs=["nob-Latn", "nno-Latn"], + main_score="f1", + date=("2020-01-01", "2020-12-31"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="CC BY 4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" +@inproceedings{opus4, + title={OPUS-MT — Building open translation services for the World}, + author={Tiedemann, J{\"o}rg and Thottingal, Santhosh}, + booktitle={Proceedings of the 22nd Annual Conference of the European Association for Machine Translation (EAMT)}, + year={2020} +} +""", + n_samples={"test": 2050}, + avg_character_length={"test": 1884.0}, + ) + + def dataset_transform(self): + # Convert to standard format + self.dataset = self.dataset.rename_column("nb", "sentence1") + self.dataset = self.dataset.rename_column("nn", "sentence2") diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/norwegian_courts_bitext_mining.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/norwegian_courts_bitext_mining.py new file mode 100644 index 0000000000000000000000000000000000000000..bd6a1f3a31a9ef571afeb799e2f9073766a69ca9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/norwegian_courts_bitext_mining.py @@ -0,0 +1,36 @@ +from mteb.abstasks import AbsTaskBitextMining +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class NorwegianCourtsBitextMining(AbsTaskBitextMining): + metadata = TaskMetadata( + name="NorwegianCourtsBitextMining", + dataset={ + "path": "kardosdrur/norwegian-courts", + "revision": "d79af07e969a6678fcbbe819956840425816468f", + }, + description="Nynorsk and Bokmål parallel corpus from Norwegian courts. ", + reference="https://opus.nlpl.eu/ELRC-Courts_Norway-v1.php", + type="BitextMining", + category="s2s", + eval_splits=["test"], + eval_langs=["nno-Latn", "nob-Latn"], + main_score="accuracy", + date=("2000-01-01", "2020-12-31"), # approximate guess + form=["spoken"], + domains=["Spoken", "Legal"], + task_subtypes=["Dialect pairing"], + license="openUnder-PSI", + socioeconomic_status="high", + annotations_creators="derived", # best guess + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": 456}, + avg_character_length={"test": 82.11}, + ) + + def dataset_transform(self) -> None: + # Convert to standard format + self.dataset = self.dataset.rename_column("nb", "sentence1") + self.dataset = self.dataset.rename_column("nn", "sentence2") diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/srn/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/srn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/VieMedEVBitextMining.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/VieMedEVBitextMining.py new file mode 100644 index 0000000000000000000000000000000000000000..3d3196a8920c2b643cdf6f1558592bb6f725a1f1 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/VieMedEVBitextMining.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import random + +import datasets + +from mteb.abstasks import AbsTaskBitextMining +from mteb.abstasks.TaskMetadata import TaskMetadata + +TEST_SAMPLES = 2048 + + +class VieMedEVBitextMining(AbsTaskBitextMining): + metadata = TaskMetadata( + name="VieMedEVBitextMining", + dataset={ + "path": "nhuvo/MedEV", + "revision": "d03c69413bc53d1cea5a5375b3a953c4fee35ecd", + "trust_remote_code": True, + }, + description="A high-quality Vietnamese-English parallel data from the medical domain for machine translation", + reference="https://aclanthology.org/2015.iwslt-evaluation.11/", + type="BitextMining", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn", "vie-Latn"], + main_score="f1", + date=("2024-08-28", "2022-03-28"), + form=["written"], + domains=["Medical"], + task_subtypes=[], + license="cc-by-nc", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="human-translated and localized", + bibtex_citation="""@inproceedings{medev, + title = {{Improving Vietnamese-English Medical Machine Translation}}, + author = {Nhu Vo and Dat Quoc Nguyen and Dung D. Le and Massimo Piccardi and Wray Buntine}, + booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING)}, + year = {2024} +}""", + n_samples={"test": TEST_SAMPLES}, + avg_character_length={"test": 139.23}, + ) + + def dataset_transform(self): + # Convert to standard format + ds = {} + seed = 42 + random.seed(seed) + # Get all texts + all_texts = self.dataset["test"]["text"] + + # Determine the midpoint of the list + mid_index = len(all_texts) // 2 + # Pairs are in two halves + en_sentences = all_texts[:mid_index] + vie_sentences = all_texts[mid_index:] + assert len(en_sentences) == len( + vie_sentences + ), "The split does not result in equal halves." + + # Downsample + indices = list(range(len(en_sentences))) + random.shuffle(indices) + sample_indices = indices[:TEST_SAMPLES] + en_sentences = [en_sentences[i] for i in sample_indices] + vie_sentences = [vie_sentences[i] for i in sample_indices] + assert ( + len(en_sentences) == len(vie_sentences) == TEST_SAMPLES + ), f"Exceeded {TEST_SAMPLES} samples for 'test' split." + + # Return dataset + ds["test"] = datasets.Dataset.from_dict( + {"sentence1": vie_sentences, "sentence2": en_sentences} + ) + self.dataset = datasets.DatasetDict(ds) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bfee0a6ae15055499d2734ecd898b556c3e9d347 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/__init__.py @@ -0,0 +1,133 @@ +from __future__ import annotations + +from .ara.AJGT import * +from .ara.HotelReviewSentimentClassification import * +from .ara.OnlineStoreReviewSentimentClassification import * +from .ara.RestaurantReviewSentimentClassification import * +from .ara.TweetEmotionClassification import * +from .ara.TweetSarcasmClassification import * +from .ben.BengaliDocumentClassification import * +from .ben.BengaliHateSpeechClassification import * +from .ben.BengaliSentimentAnalysis import * +from .bul.BulgarianStoreReviewSentimentClassfication import * +from .ces.CSFDCZMovieReviewSentimentClassification import * +from .ces.CzechProductReviewSentimentClassification import * +from .ces.CzechSoMeSentimentClassification import * +from .ces.CzechSubjectivityClassification import * +from .dan.AngryTweetsClassification import * +from .dan.DanishPoliticalCommentsClassification import * +from .dan.DKHateClassification import * +from .dan.LccSentimentClassification import * +from .deu.GermanPoliticiansTwitterSentimentClassification import * +from .deu.TenKGnadClassification import * +from .ell.GreekLegalCodeClassification import * +from .eng.AmazonPolarityClassification import * +from .eng.ArxivClassification import * +from .eng.Banking77Classification import * +from .eng.DBpediaClassification import * +from .eng.EmotionClassification import * +from .eng.FinancialPhrasebankClassification import * +from .eng.FrenkEnClassification import * +from .eng.ImdbClassification import * +from .eng.LegalBenchClassification import * +from .eng.NewsClassification import * +from .eng.PatentClassification import * +from .eng.PoemSentimentClassification import * +from .eng.ToxicChatClassification import * +from .eng.ToxicConversationsClassification import * +from .eng.TweetSentimentExtractionClassification import * +from .eng.TweetTopicSingleClassification import * +from .eng.YahooAnswersTopicsClassification import * +from .eng.YelpReviewFullClassification import * +from .est.estonian_valence import * +from .fas.PersianFoodSentimentClassification import * +from .fil.FilipinoHateSpeechClassification import * +from .fil.FilipinoShopeeReviewsClassification import * +from .fin.FinToxicityClassification import * +from .fra.FrenchBookReviews import * +from .fra.MovieReviewSentimentClassification import * +from .guj.GujaratiNewsClassification import * +from .heb.HebrewSentimentAnalysis import * +from .hin.HindiDiscourseClassification import * +from .hin.SentimentAnalysisHindi import * +from .hrv.FrenkHrClassification import * +from .ind.IndonesianIdClickbaitClassification import * +from .ind.IndonesianMongabayConservationClassification import * +from .ita.ItaCaseholdClassification import * +from .ita.ItalianLinguistAcceptabilityClassification import * +from .jav.JavaneseIMDBClassification import * +from .jpn.WRIMEClassification import * +from .kan.KannadaNewsClassification import * +from .kor.KlueTC import * +from .kor.KorFin import * +from .kor.KorHateClassification import * +from .kor.KorSarcasmClassification import * +from .kur.KurdishSentimentClassification import * +from .mal.MalayalamNewsClassification import * +from .mar.MarathiNewsClassification import * +from .mkd.MacedonianTweetSentimentClassification import * +from .multilingual.AfriSentiClassification import * +from .multilingual.AfriSentiLangClassification import * +from .multilingual.AmazonCounterfactualClassification import * +from .multilingual.AmazonReviewsClassification import * +from .multilingual.CataloniaTweetClassification import * +from .multilingual.CyrillicTurkicLangClassification import * +from .multilingual.HinDialectClassification import * +from .multilingual.IndicLangClassification import * +from .multilingual.IndicNLPNewsClassification import * +from .multilingual.IndicSentimentClassification import * +from .multilingual.LanguageClassification import * +from .multilingual.MasakhaNEWSClassification import * +from .multilingual.MassiveIntentClassification import * +from .multilingual.MassiveScenarioClassification import * +from .multilingual.MTOPDomainClassification import * +from .multilingual.MTOPIntentClassification import * +from .multilingual.MultiHateClassification import * +from .multilingual.MultilingualSentimentClassification import * +from .multilingual.NaijaSenti import * +from .multilingual.NordicLangClassification import * +from .multilingual.NusaXSenti import * +from .multilingual.ScalaClassification import * +from .multilingual.SIB200Classification import * +from .multilingual.SouthAfricanLangClassification import * +from .multilingual.SwissJudgementClassification import * +from .multilingual.TurkicClassification import * +from .multilingual.TweetSentimentClassification import * +from .mya.MyanmarNews import * +from .nep.NepaliNewsClassification import * +from .nld.DutchBookReviewSentimentClassification import * +from .nob.NoRecClassification import * +from .nob.NorwegianParliamentClassification import * +from .ory.OdiaNewsClassification import * +from .pan.PunjabiNewsClassification import * +from .pol.PolishClassification import * +from .por.HateSpeechPortugueseClassification import * +from .ron.Moroco import * +from .ron.RomanianReviewsSentiment import * +from .ron.RomanianSentimentClassification import * +from .san.SanskritShlokasClassification import * +from .sin.SinhalaNewsClassification import * +from .sin.SinhalaNewsSourceClassification import * +from .slk.CSFDSKMovieReviewSentimentClassification import * +from .slv.FrenkSlClassification import * +from .spa.SpanishNewsClassification import * +from .spa.SpanishSentimentClassification import * +from .ssw.SiswatiNewsClassification import * +from .svk.SlovakMovieReviewSentimentClassification import * +from .swe.DalajClassification import * +from .swe.SwedishSentimentClassification import * +from .swe.SweRecClassification import * +from .tam.TamilNewsClassification import * +from .tel.TeluguAndhraJyotiNewsClassification import * +from .tha.WisesightSentimentClassification import * +from .tsn.TswanaNewsClassification import * +from .tur.TurkishMovieSentimentClassification import * +from .tur.TurkishProductSentimentClassification import * +from .ukr.UkrFormalityClassification import * +from .urd.UrduRomanSentimentClassification import * +from .vie.VieStudentFeedbackClassification import * +from .zho.CMTEBClassification import * +from .zho.YueOpenriceReviewClassification import ( + YueOpenriceReviewClassification, # noqa: F401 +) +from .zul.IsiZuluNewsClassification import * diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/AJGT.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/AJGT.py new file mode 100644 index 0000000000000000000000000000000000000000..910ddd180c84f098577fc2564efa3d0796919e1b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/AJGT.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class AJGT(AbsTaskClassification): + metadata = TaskMetadata( + name="AJGT", + dataset={ + "path": "komari6/ajgt_twitter_ar", + "revision": "af3f2fa5462ac461b696cb300d66e07ad366057f", + }, + description="Arabic Jordanian General Tweets (AJGT) Corpus consisted of 1,800 tweets annotated as positive and negative. Modern Standard Arabic (MSA) or Jordanian dialect.", + reference="https://link.springer.com/chapter/10.1007/978-3-319-60042-0_66/", + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["ara-Arab"], + main_score="accuracy", + date=("2021-01-01", "2022-01-25"), + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="AFL", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=["ara-arab-MSA", "ara-arab-JO"], + text_creation="found", + bibtex_citation=""" +@inproceedings{alomari2017arabic, + title={Arabic tweets sentimental analysis using machine learning}, + author={Alomari, Khaled Mohammad and ElSherif, Hatem M and Shaalan, Khaled}, + booktitle={International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems}, + pages={602--610}, + year={2017}, + organization={Springer} +} +""", + n_samples={"train": 1800}, + avg_character_length={"train": 46.81}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/HotelReviewSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/HotelReviewSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..2e52ab91fe5dc8da5b9c406e4a597c9bc4070804 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/HotelReviewSentimentClassification.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2048 + + +class HotelReviewSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="HotelReviewSentimentClassification", + dataset={ + "path": "Elnagara/hard", + "revision": "b108d2c32ee4e1f4176ea233e1a5ac17bceb9ef9", + }, + description="HARD is a dataset of Arabic hotel reviews collected from the Booking.com website.", + reference="https://link.springer.com/chapter/10.1007/978-3-319-67056-0_3", + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["ara-Arab"], + main_score="accuracy", + date=("2016-06-01", "2016-07-31"), + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=["ara-arab-EG", "ara-arab-JO", "ara-arab-LB", "ara-arab-SA"], + text_creation="found", + bibtex_citation=""" +@article{elnagar2018hotel, + title={Hotel Arabic-reviews dataset construction for sentiment analysis applications}, + author={Elnagar, Ashraf and Khalifa, Yasmin S and Einea, Anas}, + journal={Intelligent natural language processing: Trends and applications}, + pages={35--52}, + year={2018}, + publisher={Springer} +} +""", + n_samples={"train": N_SAMPLES}, + avg_character_length={"train": 137.2}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["train"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/TweetSarcasmClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/TweetSarcasmClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..18cdc321964c2ac8129821b0c4f21d86d9ca677c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/TweetSarcasmClassification.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TweetSarcasmClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TweetSarcasmClassification", + dataset={ + "path": "iabufarha/ar_sarcasm", + "revision": "557bf94ac6177cc442f42d0b09b6e4b76e8f47c9", + }, + description="Arabic sarcasm detection dataset, which was created through the reannotation of available Arabic sentiment analysis datasets.", + reference="https://aclanthology.org/2020.osact-1.5/", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["ara-Arab"], + main_score="accuracy", + date=("2020-01-01", "2021-01-01"), + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="MIT", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=["ara-arab-EG", "ara-arab-LB", "ara-arab-MA", "ara-arab-SA"], + text_creation="found", + bibtex_citation=""" +@inproceedings{abu-farha-magdy-2020-arabic, + title = "From {A}rabic Sentiment Analysis to Sarcasm Detection: The {A}r{S}arcasm Dataset", + author = "Abu Farha, Ibrahim and + Magdy, Walid", + editor = "Al-Khalifa, Hend and + Magdy, Walid and + Darwish, Kareem and + Elsayed, Tamer and + Mubarak, Hamdy", + booktitle = "Proceedings of the 4th Workshop on Open-Source Arabic Corpora and Processing Tools, with a Shared Task on Offensive Language Detection", + month = may, + year = "2020", + address = "Marseille, France", + publisher = "European Language Resource Association", + url = "https://aclanthology.org/2020.osact-1.5", + pages = "32--39", + abstract = "Sarcasm is one of the main challenges for sentiment analysis systems. Its complexity comes from the expression of opinion using implicit indirect phrasing. In this paper, we present ArSarcasm, an Arabic sarcasm detection dataset, which was created through the reannotation of available Arabic sentiment analysis datasets. The dataset contains 10,547 tweets, 16{\%} of which are sarcastic. In addition to sarcasm the data was annotated for sentiment and dialects. Our analysis shows the highly subjective nature of these tasks, which is demonstrated by the shift in sentiment labels based on annotators{'} biases. Experiments show the degradation of state-of-the-art sentiment analysers when faced with sarcastic content. Finally, we train a deep learning model for sarcasm detection using BiLSTM. The model achieves an F1 score of 0.46, which shows the challenging nature of the task, and should act as a basic baseline for future research on our dataset.", + language = "English", + ISBN = "979-10-95546-51-1", +} +""", + n_samples={"test": 2110}, + avg_character_length={"test": 102.1}, + ) + + def dataset_transform(self): + # labels: 0 non-sarcastic, 1 sarcastic + self.dataset = self.dataset.rename_columns( + {"tweet": "text", "sarcasm": "label"} + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliDocumentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliDocumentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..120b09986827d5caf850145b8ee393df3e6830bc --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliDocumentClassification.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class BengaliDocumentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="BengaliDocumentClassification", + description="Dataset for News Classification, categorized with 13 domains.", + reference="https://aclanthology.org/2023.eacl-main.4", + dataset={ + "path": "dialect-ai/shironaam", + "revision": "1c6e67433da618073295b7c90f1c55fa8e78f35c", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["ben-Beng"], + main_score="accuracy", + date=("2022-05-01", "2023-05-01"), + form=["written"], + dialect=[], + domains=["News"], + task_subtypes=[], + license="CC BY-NC-SA 4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + text_creation="found", + bibtex_citation=""" + @inproceedings{akash-etal-2023-shironaam, + title = "Shironaam: {B}engali News Headline Generation using Auxiliary Information", + author = "Akash, Abu Ubaida and + Nayeem, Mir Tafseer and + Shohan, Faisal Tareque and + Islam, Tanvir", + booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics", + month = may, + year = "2023", + address = "Dubrovnik, Croatia", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2023.eacl-main.4", + pages = "52--67" + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 1658.1}, + ) + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns( + {"article": "text", "category": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliHateSpeechClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliHateSpeechClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..162bb71e47d8cf69b4b326b9034b73781a09786d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliHateSpeechClassification.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class BengaliHateSpeechClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="BengaliHateSpeechClassification", + description="The Bengali Hate Speech Dataset is a Bengali-language dataset of news articles collected from various Bengali media sources and categorized based on the type of hate in the text.", + reference="https://huggingface.co/datasets/bn_hate_speech", + dataset={ + "path": "rezacsedu/bn_hate_speech", + "revision": "99612296bc093f0720cac7d7cbfcb67eecf1ca2f", + }, + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["ben-Beng"], + main_score="f1", + date=("2019-12-01", "2020-04-09"), + form=["written"], + dialect=[], + domains=["News"], + task_subtypes=["Sentiment/Hate speech"], + license="MIT", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + text_creation="found", + bibtex_citation="""@inproceedings{karim2020BengaliNLP, + title={Classification Benchmarks for Under-resourced Bengali Language based on Multichannel Convolutional-LSTM Network}, + author={Karim, Md. Rezaul and Chakravarti, Bharathi Raja and P. McCrae, John and Cochez, Michael}, + booktitle={7th IEEE International Conference on Data Science and Advanced Analytics (IEEE DSAA,2020)}, + publisher={IEEE}, + year={2020} +} +""", + n_samples={"train": 3418}, + avg_character_length={"train": 103.42}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["train"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliSentimentAnalysis.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliSentimentAnalysis.py new file mode 100644 index 0000000000000000000000000000000000000000..f26ba98e72879c9f8447243d86463abc4834bfaf --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliSentimentAnalysis.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class BengaliSentimentAnalysis(AbsTaskClassification): + metadata = TaskMetadata( + name="BengaliSentimentAnalysis", + description="dataset contains 3307 Negative reviews and 8500 Positive reviews collected and manually annotated from Youtube Bengali drama.", + reference="https://data.mendeley.com/datasets/p6zc7krs37/4", + dataset={ + "path": "Akash190104/bengali_sentiment_analysis", + "revision": "a4b3685b1854cc26c554dda4c7cb918a36a6fb6c", + }, + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["ben-Beng"], + main_score="f1", + date=("2020-06-24", "2020-11-26"), + form=["written"], + dialect=[], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="CC BY 4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + text_creation="found", + bibtex_citation="""@inproceedings{sazzed2020cross, + title={Cross-lingual sentiment classification in low-resource Bengali language}, + author={Sazzed, Salim}, + booktitle={Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)}, + pages={50--60}, + year={2020} + }""", + n_samples={"train": 11807}, + avg_character_length={"train": 69.66}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["train"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/BulgarianStoreReviewSentimentClassfication.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/BulgarianStoreReviewSentimentClassfication.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1de1037e960852ade89daa6c5cf8d2367c6085 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/BulgarianStoreReviewSentimentClassfication.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class BulgarianStoreReviewSentimentClassfication(AbsTaskClassification): + metadata = TaskMetadata( + name="BulgarianStoreReviewSentimentClassfication", + description="Bulgarian online store review dataset for sentiment classification.", + reference="https://doi.org/10.7910/DVN/TXIK9P", + dataset={ + "path": "artist/Bulgarian-Online-Store-Feedback-Text-Analysis", + "revision": "701984d6c6efea0e14a1c7850ef70e464c5577c0", + }, + type="Classification", + category="s2s", + date=("2018-05-14", "2018-05-14"), + eval_splits=["test"], + eval_langs=["bul-Cyrl"], + main_score="accuracy", + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="cc-by-4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@data{DVN/TXIK9P_2018, +author = {Georgieva-Trifonova, Tsvetanka and Stefanova, Milena and Kalchev, Stefan}, +publisher = {Harvard Dataverse}, +title = {{Dataset for ``Customer Feedback Text Analysis for Online Stores Reviews in Bulgarian''}}, +year = {2018}, +version = {V1}, +doi = {10.7910/DVN/TXIK9P}, +url = {https://doi.org/10.7910/DVN/TXIK9P} +} +""", + n_samples={"test": 182}, + avg_character_length={"test": 316.7}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"Review": "text", "Category": "label"} + ) + + labels = self.dataset["train"]["label"] + lab2idx = {lab: idx for idx, lab in enumerate(sorted(set(labels)))} + + self.dataset = self.dataset.map( + lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CSFDCZMovieReviewSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CSFDCZMovieReviewSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..6fa03f38453c3c92818b4d563c4c027e63ac273f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CSFDCZMovieReviewSentimentClassification.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2048 + + +class CSFDCZMovieReviewSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CSFDCZMovieReviewSentimentClassification", + description="The dataset contains 30k user reviews from csfd.cz in Czech.", + reference="https://arxiv.org/abs/2304.01922", + dataset={ + "path": "fewshot-goes-multilingual/cs_csfd-movie-reviews", + "revision": "dd2ede6faaea338ef6b1e2966f06808656975a23", + }, + type="Classification", + category="s2s", + date=("2002-06-28", "2020-03-13"), + eval_splits=["test"], + eval_langs=["ces-Latn"], + main_score="accuracy", + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="CC-BY-SA-4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" +@misc{štefánik2023resources, + title={Resources and Few-shot Learners for In-context Learning in Slavic Languages}, + author={Michal Štefánik and Marek Kadlčík and Piotr Gramacki and Petr Sojka}, + year={2023}, + eprint={2304.01922}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 386.5}, + ) + + @property + def metadata_dict(self): + md = super().metadata_dict + # Increase the samples_per_label in order to improve baseline performance + md["samples_per_label"] = 20 + return md + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"comment": "text", "rating_int": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"], n_samples=N_SAMPLES + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechProductReviewSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechProductReviewSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..d4670d67468639546ae4555bb318f990f5b1c36f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechProductReviewSentimentClassification.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class CzechProductReviewSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CzechProductReviewSentimentClassification", + description="User reviews of products on Czech e-shop Mall.cz with 3 sentiment classes (positive, neutral, negative)", + reference="https://aclanthology.org/W13-1609/", + dataset={ + "path": "fewshot-goes-multilingual/cs_mall-product-reviews", + "revision": "2e6fedf42c9c104e83dfd95c3a453721e683e244", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["ces-Latn"], + main_score="accuracy", + date=("2013-01-01", "2013-06-01"), + form=["written"], + dialect=[], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="CC BY-NC-SA 4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + text_creation="found", + bibtex_citation=""" + @inproceedings{habernal-etal-2013-sentiment, + title = "Sentiment Analysis in {C}zech Social Media Using Supervised Machine Learning", + author = "Habernal, Ivan and + Pt{\'a}{\v{c}}ek, Tom{\'a}{\v{s}} and + Steinberger, Josef", + editor = "Balahur, Alexandra and + van der Goot, Erik and + Montoyo, Andres", + booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", + month = jun, + year = "2013", + address = "Atlanta, Georgia", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/W13-1609", + pages = "65--74", + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 153.26}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns( + {"comment": "text", "rating_str": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSoMeSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSoMeSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..c456bc66199afb493eb0c87d7daefb684c721e99 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSoMeSentimentClassification.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class CzechSoMeSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CzechSoMeSentimentClassification", + description="User comments on Facebook", + reference="https://aclanthology.org/W13-1609/", + dataset={ + "path": "fewshot-goes-multilingual/cs_facebook-comments", + "revision": "6ced1d87a030915822b087bf539e6d5c658f1988", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["ces-Latn"], + main_score="accuracy", + date=("2013-01-01", "2013-06-01"), + form=["written"], + dialect=[], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="CC BY-NC-SA 4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + text_creation="found", + bibtex_citation=""" + @inproceedings{habernal-etal-2013-sentiment, + title = "Sentiment Analysis in {C}zech Social Media Using Supervised Machine Learning", + author = "Habernal, Ivan and + Pt{\'a}{\v{c}}ek, Tom{\'a}{\v{s}} and + Steinberger, Josef", + editor = "Balahur, Alexandra and + van der Goot, Erik and + Montoyo, Andres", + booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", + month = jun, + year = "2013", + address = "Atlanta, Georgia", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/W13-1609", + pages = "65--74", + } + """, + n_samples={"test": 1000}, + avg_character_length={"test": 59.89}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns( + {"comment": "text", "sentiment_int": "label"} + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSubjectivityClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSubjectivityClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..3c6aef029f063a667edd8d7262ffb869d94cef3c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSubjectivityClassification.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class CzechSubjectivityClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CzechSubjectivityClassification", + description="An Czech dataset for subjectivity classification.", + reference="https://arxiv.org/abs/2009.08712", + dataset={ + "path": "pauli31/czech-subjectivity-dataset", + "revision": "e387ddf167f3eba99936cff89909ed6264f17e1f", + }, + type="Classification", + category="s2s", + date=("2022-04-01", "2022-04-01"), + eval_splits=["validation", "test"], + eval_langs=["ces-Latn"], + main_score="accuracy", + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{priban-steinberger-2022-czech, + title = "\{C\}zech Dataset for Cross-lingual Subjectivity Classification", + author = "P{\v{r}}ib{\'a}{\v{n}}, Pavel and + Steinberger, Josef", + booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference", + month = jun, + year = "2022", + address = "Marseille, France", + publisher = "European Language Resources Association", + url = "https://aclanthology.org/2022.lrec-1.148", + pages = "1381--1391", +} +""", + n_samples={"validation": 500, "test": 2000}, + avg_character_length={"validation": 108.2, "test": 108.3}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/AngryTweetsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/AngryTweetsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..55632c36b12f11376a0a624d3414f0d358c1929b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/AngryTweetsClassification.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class AngryTweetsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="AngryTweetsClassification", + dataset={ + "path": "DDSC/angry-tweets", + "revision": "20b0e6081892e78179356fada741b7afa381443d", + }, + description="A sentiment dataset with 3 classes (positiv, negativ, neutral) for Danish tweets", + reference="https://aclanthology.org/2021.nodalida-main.53/", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["dan-Latn"], + main_score="accuracy", + date=("2021-01-01", "2021-12-31"), + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="CC-BY-4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{pauli2021danlp, + title={DaNLP: An open-source toolkit for Danish Natural Language Processing}, + author={Pauli, Amalie Brogaard and Barrett, Maria and Lacroix, Oph{\'e}lie and Hvingelby, Rasmus}, + booktitle={Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa)}, + pages={460--466}, + year={2021} +}""", + n_samples={"test": 1050}, + avg_character_length={"test": 156.1}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DKHateClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DKHateClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..e32ffe4d0cb95aba1dda726fdb4d232d67a285c5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DKHateClassification.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class DKHateClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="DKHateClassification", + dataset={ + "path": "DDSC/dkhate", + "revision": "59d12749a3c91a186063c7d729ec392fda94681c", + }, + description="Danish Tweets annotated for Hate Speech either being Offensive or not", + reference="https://aclanthology.org/2020.lrec-1.430/", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["dan-Latn"], + main_score="accuracy", + date=("2018-01-01", "2018-12-31"), + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="CC-BY-4.0", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{sigurbergsson-derczynski-2020-offensive, + title = "Offensive Language and Hate Speech Detection for {D}anish", + author = "Sigurbergsson, Gudbjartur Ingi and + Derczynski, Leon", + editor = "Calzolari, Nicoletta and + B{\'e}chet, Fr{\'e}d{\'e}ric and + Blache, Philippe and + Choukri, Khalid and + Cieri, Christopher and + Declerck, Thierry and + Goggi, Sara and + Isahara, Hitoshi and + Maegaard, Bente and + Mariani, Joseph and + Mazo, H{\'e}l{\`e}ne and + Moreno, Asuncion and + Odijk, Jan and + Piperidis, Stelios", + booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference", + month = may, + year = "2020", + address = "Marseille, France", + publisher = "European Language Resources Association", + url = "https://aclanthology.org/2020.lrec-1.430", + pages = "3498--3508", + abstract = "The presence of offensive language on social media platforms and the implications this poses is becoming a major concern in modern society. Given the enormous amount of content created every day, automatic methods are required to detect and deal with this type of content. Until now, most of the research has focused on solving the problem for the English language, while the problem is multilingual. We construct a Danish dataset DKhate containing user-generated comments from various social media platforms, and to our knowledge, the first of its kind, annotated for various types and target of offensive language. We develop four automatic classification systems, each designed to work for both the English and the Danish language. In the detection of offensive language in English, the best performing system achieves a macro averaged F1-score of 0.74, and the best performing system for Danish achieves a macro averaged F1-score of 0.70. In the detection of whether or not an offensive post is targeted, the best performing system for English achieves a macro averaged F1-score of 0.62, while the best performing system for Danish achieves a macro averaged F1-score of 0.73. Finally, in the detection of the target type in a targeted offensive post, the best performing system for English achieves a macro averaged F1-score of 0.56, and the best performing system for Danish achieves a macro averaged F1-score of 0.63. Our work for both the English and the Danish language captures the type and targets of offensive language, and present automatic methods for detecting different kinds of offensive language such as hate speech and cyberbullying.", + language = "English", + ISBN = "979-10-95546-34-4", +}""", + n_samples={"test": 329}, + avg_character_length={"test": 104.0}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = dict(self.metadata) + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict + + def dataset_transform(self): + # convert label to a 0/1 label + labels = self.dataset["train"]["label"] # type: ignore + lab2idx = {lab: idx for idx, lab in enumerate(set(labels))} + self.dataset = self.dataset.map( + lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DanishPoliticalCommentsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DanishPoliticalCommentsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..8906e8c00bae957ee8f90eddf4a69d2524339f8a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DanishPoliticalCommentsClassification.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class DanishPoliticalCommentsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="DanishPoliticalCommentsClassification", + dataset={ + "path": "danish_political_comments", + "revision": "edbb03726c04a0efab14fc8c3b8b79e4d420e5a1", + }, + description="A dataset of Danish political comments rated for sentiment", + reference="https://huggingface.co/datasets/danish_political_comments", + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["dan-Latn"], + main_score="accuracy", + date=( + "2000-01-01", + "2022-12-31", + ), # Estimated range for the collection of comments + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"train": 9010}, + avg_character_length={"train": 69.9}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = dict(self.metadata) + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("sentence", "text") + self.dataset = self.dataset.rename_column("target", "label") + + # create train and test splits + self.dataset = self.dataset["train"].train_test_split(0.2, seed=self.seed) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DdiscoCohesionClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DdiscoCohesionClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..63674cb4a5bc434446b73e5253ec4a2014ee0114 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DdiscoCohesionClassification.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class DdiscoCohesionClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="Ddisco", + dataset={ + "path": "DDSC/ddisco", + "revision": "514ab557579fcfba538a4078d6d647248a0e6eb7", + }, + description="A Danish Discourse dataset with values for coherence and source (Wikipedia or Reddit)", + reference="https://aclanthology.org/2022.lrec-1.260/", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["dan-Latn"], + main_score="accuracy", + date=("2021-01-01", "2022-06-25"), + form=["written"], + domains=["Non-fiction", "Social"], + dialect=[], + task_subtypes=["Discourse coherence"], + license="cc-by-sa-3.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + text_creation="found", + bibtex_citation=""" + @inproceedings{flansmose-mikkelsen-etal-2022-ddisco, + title = "{DD}is{C}o: A Discourse Coherence Dataset for {D}anish", + author = "Flansmose Mikkelsen, Linea and + Kinch, Oliver and + Jess Pedersen, Anders and + Lacroix, Oph{\'e}lie", + editor = "Calzolari, Nicoletta and + B{\'e}chet, Fr{\'e}d{\'e}ric and + Blache, Philippe and + Choukri, Khalid and + Cieri, Christopher and + Declerck, Thierry and + Goggi, Sara and + Isahara, Hitoshi and + Maegaard, Bente and + Mariani, Joseph and + Mazo, H{\'e}l{\`e}ne and + Odijk, Jan and + Piperidis, Stelios", + booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference", + month = jun, + year = "2022", + address = "Marseille, France", + publisher = "European Language Resources Association", + url = "https://aclanthology.org/2022.lrec-1.260", + pages = "2440--2445", + abstract = "To date, there has been no resource for studying discourse coherence on real-world Danish texts. Discourse coherence has mostly been approached with the assumption that incoherent texts can be represented by coherent texts in which sentences have been shuffled. However, incoherent real-world texts rarely resemble that. We thus present DDisCo, a dataset including text from the Danish Wikipedia and Reddit annotated for discourse coherence. We choose to annotate real-world texts instead of relying on artificially incoherent text for training and testing models. Then, we evaluate the performance of several methods, including neural networks, on the dataset.", +} + """, + n_samples=None, + avg_character_length=None, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns({"rating": "label"}).remove_columns( + ["domain"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/LccSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/LccSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..17c674b9f0a7e50b9f91abaced40856330062402 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/LccSentimentClassification.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class LccSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LccSentimentClassification", + dataset={ + "path": "DDSC/lcc", + "revision": "de7ba3406ee55ea2cc52a0a41408fa6aede6d3c6", + }, + description="The leipzig corpora collection, annotated for sentiment", + reference="https://github.com/fnielsen/lcc-sentiment", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["dan-Latn"], + main_score="accuracy", + date=("2006-01-01", "2006-12-31"), + form=["written"], + domains=["News", "Web"], + task_subtypes=["Sentiment/Hate speech"], + license="CC-BY-4.0", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{quasthoff-etal-2006-corpus, + title = "Corpus Portal for Search in Monolingual Corpora", + author = "Quasthoff, Uwe and + Richter, Matthias and + Biemann, Christian", + editor = "Calzolari, Nicoletta and + Choukri, Khalid and + Gangemi, Aldo and + Maegaard, Bente and + Mariani, Joseph and + Odijk, Jan and + Tapias, Daniel", + booktitle = "Proceedings of the Fifth International Conference on Language Resources and Evaluation ({LREC}{'}06)", + month = may, + year = "2006", + address = "Genoa, Italy", + publisher = "European Language Resources Association (ELRA)", + url = "http://www.lrec-conf.org/proceedings/lrec2006/pdf/641_pdf.pdf", + abstract = "A simple and flexible schema for storing and presenting monolingual language resources is proposed. In this format, data for 18 different languages is already available in various sizes. The data is provided free of charge for online use and download. The main target is to ease the application of algorithms for monolingual and interlingual studies.", +}""", + n_samples={"test": 150}, + avg_character_length={"test": 118.7}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/GermanPoliticiansTwitterSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/GermanPoliticiansTwitterSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..338ec4314697084a01a5d68042fec2eddba2d0cf --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/GermanPoliticiansTwitterSentimentClassification.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class GermanPoliticiansTwitterSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="GermanPoliticiansTwitterSentimentClassification", + description="GermanPoliticiansTwitterSentiment is a dataset of German tweets categorized with their sentiment (3 classes).", + reference="https://aclanthology.org/2022.konvens-1.9", + dataset={ + "path": "Alienmaster/german_politicians_twitter_sentiment", + "revision": "65343b17f5a76227ab2e15b9424dfab6466ffcb1", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="accuracy", + date=("2021-01-01", "2021-12-31"), + form=["written"], + domains=["Social", "Government"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{schmidt-etal-2022-sentiment, + title = "Sentiment Analysis on {T}witter for the Major {G}erman Parties during the 2021 {G}erman Federal Election", + author = "Schmidt, Thomas and + Fehle, Jakob and + Weissenbacher, Maximilian and + Richter, Jonathan and + Gottschalk, Philipp and + Wolff, Christian", + editor = "Schaefer, Robin and + Bai, Xiaoyu and + Stede, Manfred and + Zesch, Torsten", + booktitle = "Proceedings of the 18th Conference on Natural Language Processing (KONVENS 2022)", + month = "12--15 " # sep, + year = "2022", + address = "Potsdam, Germany", + publisher = "KONVENS 2022 Organizers", + url = "https://aclanthology.org/2022.konvens-1.9", + pages = "74--87", + } + """, + n_samples={"test": 357}, + avg_character_length={"test": 302.48}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("majority_sentiment", "label") diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/TenKGnadClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/TenKGnadClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..11fc94786a7c1200033aec231ca244f690b8faa5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/TenKGnadClassification.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class TenKGnadClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TenKGnadClassification", + description="10k German News Articles Dataset (10kGNAD) contains news articles from the online Austrian newspaper website DER Standard with their topic classification (9 classes).", + reference="https://tblock.github.io/10kGNAD/", + dataset={ + "path": "gnad10", + "revision": "0798affe9b3f88cfda4267b6fbc50fac67046ee5", + }, + type="Classification", + category="p2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="accuracy", + date=("2015-06-01", "2016-05-31"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="cc-by-nc-sa-4.0", + socioeconomic_status="medium", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @InProceedings{Schabus2017, + Author = {Dietmar Schabus and Marcin Skowron and Martin Trapp}, + Title = {One Million Posts: A Data Set of German Online Discussions}, + Booktitle = {Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)}, + Pages = {1241--1244}, + Year = {2017}, + Address = {Tokyo, Japan}, + Doi = {10.1145/3077136.3080711}, + Month = aug + } + """, + n_samples={"test": 1028}, + avg_character_length={"test": 2627.31}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ell/GreekLegalCodeClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ell/GreekLegalCodeClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..d93bfe94932b70e2d691854350cf6aa9ed9dcc6d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ell/GreekLegalCodeClassification.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +TEST_SAMPLES = 2048 + + +class GreekLegalCodeClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="GreekLegalCodeClassification", + description="Greek Legal Code Dataset for Classification. (subset = chapter)", + reference="https://arxiv.org/abs/2109.15298", + dataset={ + "path": "AI-team-UoA/greek_legal_code", + "revision": "de0fdb34424f07d1ac6f0ede23ee0ed44bd9f5d1", + "name": "chapter", + }, + type="Classification", + category="s2s", + date=("2021-01-01", "2021-01-01"), + eval_splits=["validation", "test"], + eval_langs=["ell-Grek"], + main_score="accuracy", + form=["written"], + domains=["Legal"], + task_subtypes=["Topic classification"], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{papaloukas-etal-2021-glc, + title = "Multi-granular Legal Topic Classification on Greek Legislation", + author = "Papaloukas, Christos and Chalkidis, Ilias and Athinaios, Konstantinos and Pantazi, Despina-Athanasia and Koubarakis, Manolis", + booktitle = "Proceedings of the Natural Legal Language Processing Workshop 2021", + year = "2021", + address = "Punta Cana, Dominican Republic", + publisher = "Association for Computational Linguistics", + url = "https://arxiv.org/abs/2109.15298", + doi = "10.48550/arXiv.2109.15298", + pages = "63--75" +} +""", + n_samples={"validation": TEST_SAMPLES, "test": TEST_SAMPLES}, + avg_character_length={"validation": 4046.8, "test": 4200.8}, + ) + + def dataset_transform(self): + self.dataset["validation"] = ( + self.dataset["validation"] + .shuffle(seed=self.seed) + .select(range(TEST_SAMPLES)) + ) + self.dataset["test"] = ( + self.dataset["test"].shuffle(seed=self.seed).select(range(TEST_SAMPLES)) + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/AmazonPolarityClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/AmazonPolarityClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..bc4344489d041a9b4f22e24d01d0b20a8dc18081 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/AmazonPolarityClassification.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class AmazonPolarityClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="AmazonPolarityClassification", + description="Amazon Polarity Classification Dataset.", + reference="https://huggingface.co/datasets/amazon_polarity", + dataset={ + "path": "mteb/amazon_polarity", + "revision": "e2d317d38cd51312af73b3d32a06d1a08b442046", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=( + "2012-01-01", + "2015-12-31", + ), # Estimated range for the collection of reviews + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{McAuley2013HiddenFA, + title={Hidden factors and hidden topics: understanding rating dimensions with review text}, + author={Julian McAuley and Jure Leskovec}, + journal={Proceedings of the 7th ACM conference on Recommender systems}, + year={2013}, + url={https://api.semanticscholar.org/CorpusID:6440341} +}""", + n_samples={"test": 400000}, + avg_character_length={"test": 431.4}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ArxivClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ArxivClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d4fd499030c1b9629c167f2c5580c113339e9d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ArxivClassification.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class ArxivClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ArxivClassification", + description="Classification Dataset of Arxiv Papers", + dataset={ + "path": "ccdv/arxiv-classification", + "revision": "f9bd92144ed76200d6eb3ce73a8bd4eba9ffdc85", + }, + reference="https://ieeexplore.ieee.org/document/8675939", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1998-11-11", "2019-03-28"), + form=["written"], + domains=["Academic"], + task_subtypes=["Topic classification"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@ARTICLE{8675939, + author={He, Jun and Wang, Liqun and Liu, Liu and Feng, Jiao and Wu, Hao}, + journal={IEEE Access}, + title={Long Document Classification From Local Word Glimpses via Recurrent Attention Learning}, + year={2019}, + volume={7}, + number={}, + pages={40707-40718}, + doi={10.1109/ACCESS.2019.2907992} + }""", + n_samples={"test": 2048}, + avg_character_length={}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/Banking77Classification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/Banking77Classification.py new file mode 100644 index 0000000000000000000000000000000000000000..f3ce59ad556e1e908fde01bf0befd312e3c88df9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/Banking77Classification.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class Banking77Classification(AbsTaskClassification): + metadata = TaskMetadata( + name="Banking77Classification", + description="Dataset composed of online banking queries annotated with their corresponding intents.", + reference="https://arxiv.org/abs/2003.04807", + dataset={ + "path": "mteb/banking77", + "revision": "0fd18e25b25c072e09e0d92ab615fda904d66300", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=( + "2019-01-01", + "2019-12-31", + ), # Estimated range for the collection of queries + form=["written"], + domains=[], + task_subtypes=[], + license="MIT", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{casanueva-etal-2020-efficient, + title = "Efficient Intent Detection with Dual Sentence Encoders", + author = "Casanueva, I{\~n}igo and + Tem{\v{c}}inas, Tadas and + Gerz, Daniela and + Henderson, Matthew and + Vuli{\'c}, Ivan", + editor = "Wen, Tsung-Hsien and + Celikyilmaz, Asli and + Yu, Zhou and + Papangelis, Alexandros and + Eric, Mihail and + Kumar, Anuj and + Casanueva, I{\~n}igo and + Shah, Rushin", + booktitle = "Proceedings of the 2nd Workshop on Natural Language Processing for Conversational AI", + month = jul, + year = "2020", + address = "Online", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2020.nlp4convai-1.5", + doi = "10.18653/v1/2020.nlp4convai-1.5", + pages = "38--45", +}""", + n_samples={"test": 3080}, + avg_character_length={"test": 54.2}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/DBpediaClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/DBpediaClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..3db1016d04b42a8beae8c4d9798f04aff62e3e9b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/DBpediaClassification.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class DBpediaClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="DBpediaClassification", + description="DBpedia14 is a dataset of English texts from Wikipedia articles, categorized into 14 non-overlapping classes based on their DBpedia ontology.", + reference="https://arxiv.org/abs/1509.01626", + dataset={ + "path": "fancyzhx/dbpedia_14", + "revision": "9abd46cf7fc8b4c64290f26993c540b92aa145ac", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-01-25", "2022-01-25"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Topic classification"], + license="cc-by-sa-3.0", + socioeconomic_status="low", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{NIPS2015_250cf8b5, + author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {C. Cortes and N. Lawrence and D. Lee and M. Sugiyama and R. Garnett}, + pages = {}, + publisher = {Curran Associates, Inc.}, + title = {Character-level Convolutional Networks for Text Classification}, + url = {https://proceedings.neurips.cc/paper_files/paper/2015/file/250cf8b51c773f3f8dc8b4be867a9a02-Paper.pdf}, + volume = {28}, + year = {2015} + } + """, + n_samples={"test": 70000}, + avg_character_length={"test": 281.40}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("content", "text") + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["train", "test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/EmotionClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/EmotionClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..70eb8a27799b531431b1051866eb4e89a2792e6e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/EmotionClassification.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class EmotionClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="EmotionClassification", + description="Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise.", + reference="https://www.aclweb.org/anthology/D18-1404", + dataset={ + "path": "mteb/emotion", + "revision": "4f58c6b202a23cf9a4da393831edf4f9183cad37", + }, + type="Classification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=( + "2017-01-01", + "2018-12-31", + ), # Estimated range for the collection of Twitter messages + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{saravia-etal-2018-carer, + title = "{CARER}: Contextualized Affect Representations for Emotion Recognition", + author = "Saravia, Elvis and + Liu, Hsien-Chi Toby and + Huang, Yen-Hao and + Wu, Junlin and + Chen, Yi-Shin", + editor = "Riloff, Ellen and + Chiang, David and + Hockenmaier, Julia and + Tsujii, Jun{'}ichi", + booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", + month = oct # "-" # nov, + year = "2018", + address = "Brussels, Belgium", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/D18-1404", + doi = "10.18653/v1/D18-1404", + pages = "3687--3697", + abstract = "Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.", +}""", + n_samples={"validation": 2000, "test": 2000}, + avg_character_length={"validation": 95.3, "test": 95.6}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FinancialPhrasebankClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FinancialPhrasebankClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..e272e4a889c8f9f4687f3177fb0cf0669e81ebf3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FinancialPhrasebankClassification.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class FinancialPhrasebankClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="FinancialPhrasebankClassification", + description="Polar sentiment dataset of sentences from financial news, categorized by sentiment into positive, negative, or neutral.", + reference="https://arxiv.org/abs/1307.5336", + dataset={ + "path": "takala/financial_phrasebank", + "revision": "1484d06fe7af23030c7c977b12556108d1f67039", + "name": "sentences_allagree", + }, + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2013-11-01", "2013-11-01"), + form=["written"], + domains=["News"], + task_subtypes=["Sentiment/Hate speech"], + license="cc-by-nc-sa-3.0", + socioeconomic_status="medium", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @article{Malo2014GoodDO, + title={Good debt or bad debt: Detecting semantic orientations in economic texts}, + author={P. Malo and A. Sinha and P. Korhonen and J. Wallenius and P. Takala}, + journal={Journal of the Association for Information Science and Technology}, + year={2014}, + volume={65} + } + """, + n_samples={"train": 4840}, + avg_character_length={"train": 121.96}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("sentence", "text") diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FrenkEnClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FrenkEnClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..5553c799e43e598b8d3b696a6d4ca5058c0222b1 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FrenkEnClassification.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class FrenkEnClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="FrenkEnClassification", + description="English subset of the FRENK dataset", + dataset={ + "path": "classla/FRENK-hate-en", + "revision": "52483dba0ff23291271ee9249839865e3c3e7e50", + }, + reference="https://arxiv.org/abs/1906.02045", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2021-05-28", "2021-05-28"), + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="low", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{ljubešić2019frenk, + title={The FRENK Datasets of Socially Unacceptable Discourse in Slovene and English}, + author={Nikola Ljubešić and Darja Fišer and Tomaž Erjavec}, + year={2019}, + eprint={1906.02045}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/1906.02045} + }""", + n_samples={"test": 2300}, + avg_character_length={"test": 188.75}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ImdbClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ImdbClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8f84e37598de776b57b85802e30a99f2e4f6fa --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ImdbClassification.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class ImdbClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ImdbClassification", + description="Large Movie Review Dataset", + dataset={ + "path": "mteb/imdb", + "revision": "3d86128a09e091d6018b6d26cad27f2739fc2db7", + }, + reference="http://www.aclweb.org/anthology/P11-1015", + type="Classification", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=( + "2000-01-01", + "2010-12-31", + ), # Estimated range for the collection of movie reviews + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{maas-etal-2011-learning, + title = "Learning Word Vectors for Sentiment Analysis", + author = "Maas, Andrew L. and + Daly, Raymond E. and + Pham, Peter T. and + Huang, Dan and + Ng, Andrew Y. and + Potts, Christopher", + editor = "Lin, Dekang and + Matsumoto, Yuji and + Mihalcea, Rada", + booktitle = "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", + month = jun, + year = "2011", + address = "Portland, Oregon, USA", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/P11-1015", + pages = "142--150", +}""", + n_samples={"test": 25000}, + avg_character_length={"test": 1293.8}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/LegalBenchClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/LegalBenchClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..37925f4d5e281e5026cb1c7873fcd670a36916f5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/LegalBenchClassification.py @@ -0,0 +1,6427 @@ +from __future__ import annotations + +from typing import Any + +import datasets +from datasets import concatenate_datasets + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class CanadaTaxCourtOutcomesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CanadaTaxCourtOutcomesLegalBenchClassification", + description="The input is an excerpt of text from Tax Court of Canada decisions involving appeals of tax related matters. The task is to classify whether the excerpt includes the outcome of the appeal, and if so, to specify whether the appeal was allowed or dismissed. Partial success (e.g. appeal granted on one tax year but dismissed on another) counts as allowed (with the exception of costs orders which are disregarded). Where the excerpt does not clearly articulate an outcome, the system should indicate other as the outcome. Categorizing case outcomes is a common task that legal researchers complete in order to gather datasets involving outcomes in legal processes for the purposes of quantitative empirical legal research.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "canada_tax_court_outcomes", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2023-08-23", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=["en-CA"], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }""", + n_samples={"test": 244}, + avg_character_length={"test": 622.60}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIConfidentialityOfAgreementLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="ContractNLIConfidentialityOfAgreementLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA provides that the Receiving Party shall not disclose the fact that Agreement was agreed or negotiated.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_confidentiality_of_agreement", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 82}, + avg_character_length={"test": 473.17}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIExplicitIdentificationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ContractNLIExplicitIdentificationLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that all Confidential Information shall be expressly identified by the Disclosing Party.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_explicit_identification", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 109}, + avg_character_length={"test": 506.12}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIInclusionOfVerballyConveyedInformationLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="ContractNLIInclusionOfVerballyConveyedInformationLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that Confidential Information may include verbally conveyed information.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_inclusion_of_verbally_conveyed_information", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 139}, + avg_character_length={"test": 525.75}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLILimitedUseLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ContractNLILimitedUseLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party shall not use any Confidential Information for any purpose other than the purposes stated in Agreement.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_limited_use", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 208}, + avg_character_length={"test": 407.51}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLINoLicensingLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ContractNLINoLicensingLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Agreement shall not grant Receiving Party any right to Confidential Information.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_no_licensing", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 162}, + avg_character_length={"test": 419.42}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLINoticeOnCompelledDisclosureLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="ContractNLINoticeOnCompelledDisclosureLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party shall notify Disclosing Party in case Receiving Party is required by law, regulation or judicial process to disclose any Confidential Information.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_notice_on_compelled_disclosure", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 142}, + avg_character_length={"test": 503.45}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIPermissibleAcquirementOfSimilarInformationLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="ContractNLIPermissibleAcquirementOfSimilarInformationLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party may acquire information similar to Confidential Information from a third party.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_permissible_acquirement_of_similar_information", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 178}, + avg_character_length={"test": 427.40}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIPermissibleCopyLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ContractNLIPermissibleCopyLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party may create a copy of some Confidential Information in some circumstances.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_permissible_copy", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 87}, + avg_character_length={"test": 386.84}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIPermissibleDevelopmentOfSimilarInformationLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="ContractNLIPermissibleDevelopmentOfSimilarInformationLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party may independently develop information similar to Confidential Information.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_permissible_development_of_similar_information", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 136}, + avg_character_length={"test": 396.40}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIPermissiblePostAgreementPossessionLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="ContractNLIPermissiblePostAgreementPossessionLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party may retain some Confidential Information even after the return or destruction of Confidential Information.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_permissible_post-agreement_possession", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 111}, + avg_character_length={"test": 529.09}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLIReturnOfConfidentialInformationLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="ContractNLIReturnOfConfidentialInformationLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party shall destroy or return some Confidential Information upon the termination of Agreement.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_return_of_confidential_information", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 66}, + avg_character_length={"test": 478.29}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLISharingWithEmployeesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ContractNLISharingWithEmployeesLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party may share some Confidential Information with some of Receiving Party's employees.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_sharing_with_employees", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 170}, + avg_character_length={"test": 548.63}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLISharingWithThirdPartiesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ContractNLISharingWithThirdPartiesLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that the Receiving Party may share some Confidential Information with some third-parties (including consultants, agents and professional advisors).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_sharing_with_third-parties", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 180}, + avg_character_length={"test": 517.29}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class ContractNLISurvivalOfObligationsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ContractNLISurvivalOfObligationsLegalBenchClassification", + description="This task is a subset of ContractNLI, and consists of determining whether a clause from an NDA clause provides that some obligations of Agreement may survive termination of Agreement.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "contract_nli_survival_of_obligations", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1996-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{koreeda2021contractnli, + title={ContractNLI: A dataset for document-level natural language inference for contracts}, + author={Koreeda, Yuta and Manning, Christopher D}, + journal={arXiv preprint arXiv:2110.01799}, + year={2021} + }""", + n_samples={"test": 157}, + avg_character_length={"test": 417.64}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CorporateLobbyingLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CorporateLobbyingLegalBenchClassification", + description="The Corporate Lobbying task consists of determining whether a proposed Congressional bill may be relevant to a company based on a company's self-description in its SEC 10K filing.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "corporate_lobbying", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2023-08-23", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + """, + n_samples={"test": 490}, + avg_character_length={"test": 6039.85}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + self.dataset = self.dataset.map( + lambda examples: { + "text": examples["bill_title"] + + "\n\n" + + examples["bill_summary"] + + "\n\n" + + examples["company_name"] + + "\n\n" + + examples["company_description"] + } + ) + self.dataset = self.dataset.remove_columns( + ["bill_title", "bill_summary", "company_name", "company_description"] + ) + + +class CUADAffiliateLicenseLicenseeLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADAffiliateLicenseLicenseeLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if a clause describes a license grant to a licensee (incl. sublicensor) and the affiliates of such licensee/sublicensor.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_affiliate_license-licensee", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 198}, + avg_character_length={"test": 484.11}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADAffiliateLicenseLicensorLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADAffiliateLicenseLicensorLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause describes a license grant by affiliates of the licensor or that includes intellectual property of affiliates of the licensor.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_affiliate_license-licensor", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 88}, + avg_character_length={"test": 633.40}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADAntiAssignmentLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADAntiAssignmentLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause requires consent or notice of a party if the contract is assigned to a third party.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_anti-assignment", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 1172}, + avg_character_length={"test": 340.81}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADAuditRightsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADAuditRightsLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause gives a party the right to audit the books, records, or physical locations of the counterparty to ensure compliance with the contract.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_audit_rights", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 1216}, + avg_character_length={"test": 337.14}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADCapOnLiabilityLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADCapOnLiabilityLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies a cap on liability upon the breach of a party's obligation. This includes time limitation for the counterparty to bring claims or maximum amount for recovery.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_cap_on_liability", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 1246}, + avg_character_length={"test": 375.74}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADChangeOfControlLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADChangeOfControlLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause gives one party the right to terminate or is consent or notice required of the counterparty if such party undergoes a change of control, such as a merger, stock sale, transfer of all or substantially all of its assets or business, or assignment by operation of law.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_change_of_control", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 416}, + avg_character_length={"test": 391.96}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADCompetitiveRestrictionExceptionLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="CUADCompetitiveRestrictionExceptionLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause mentions exceptions or carveouts to Non-Compete, Exclusivity and No-Solicit of Customers.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_competitive_restriction_exception", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 220}, + avg_character_length={"test": 433.04}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADCovenantNotToSueLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADCovenantNotToSueLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies that a party is restricted from contesting the validity of the counterparty's ownership of intellectual property or otherwise bringing a claim against the counterparty for matters unrelated to the contract.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_covenant_not_to_sue", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 308}, + avg_character_length={"test": 402.97}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADEffectiveDateLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADEffectiveDateLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies the date upon which the agreement becomes effective.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_effective_date", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 236}, + avg_character_length={"test": 277.62}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADExclusivityLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADExclusivityLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies exclusive dealing commitment with the counterparty. This includes a commitment to procure all 'requirements' from one party of certain technology, goods, or services or a prohibition on licensing or selling technology, goods or services to third parties, or a prohibition on collaborating or working with other parties), whether during the contract or after the contract ends (or both).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_exclusivity", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 762}, + avg_character_length={"test": 369.17}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADExpirationDateLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADExpirationDateLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies the date upon which the initial term expires.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_expiration_date", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 876}, + avg_character_length={"test": 309.27}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADGoverningLawLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADGoverningLawLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies which state/country’s law governs the contract.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_governing_law", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 876}, + avg_character_length={"test": 289.87}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADInsuranceLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADInsuranceLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if clause creates a requirement for insurance that must be maintained by one party for the benefit of the counterparty.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_insurance", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 1030}, + avg_character_length={"test": 365.54}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADIPOwnershipAssignmentLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADIPOwnershipAssignmentLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies that intellectual property created by one party become the property of the counterparty, either per the terms of the contract or upon the occurrence of certain events.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_ip_ownership_assignment", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 576}, + avg_character_length={"test": 414.00}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADIrrevocableOrPerpetualLicenseLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADIrrevocableOrPerpetualLicenseLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies a license grant that is irrevocable or perpetual.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_irrevocable_or_perpetual_license", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 280}, + avg_character_length={"test": 473.40}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADJointIPOwnershipLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADJointIPOwnershipLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause provides for joint or shared ownership of intellectual property between the parties to the contract.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_joint_ip_ownership", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 192}, + avg_character_length={"test": 374.17}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADLicenseGrantLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADLicenseGrantLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause contains a license granted by one party to its counterparty.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_license_grant", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 1396}, + avg_character_length={"test": 409.89}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADLiquidatedDamagesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADLiquidatedDamagesLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause awards either party liquidated damages for breach or a fee upon the termination of a contract (termination fee).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_liquidated_damages", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 220}, + avg_character_length={"test": 351.76}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADMinimumCommitmentLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADMinimumCommitmentLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies a minimum order size or minimum amount or units per time period that one party must buy from the counterparty.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_minimum_commitment", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 772}, + avg_character_length={"test": 364.16}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADMostFavoredNationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADMostFavoredNationLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if a third party gets better terms on the licensing or sale of technology/goods/services described in the contract, the buyer of such technology/goods/services under the contract shall be entitled to those better terms.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_most_favored_nation", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 64}, + avg_character_length={"test": 418.75}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADNoSolicitOfCustomersLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADNoSolicitOfCustomersLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause restricts a party from contracting or soliciting customers or partners of the counterparty, whether during the contract or after the contract ends (or both).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_no-solicit_of_customers", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 84}, + avg_character_length={"test": 392.89}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADNoSolicitOfEmployeesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADNoSolicitOfEmployeesLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause restricts a party's soliciting or hiring employees and/or contractors from the counterparty, whether during the contract or after the contract ends (or both).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_no-solicit_of_employees", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 142}, + avg_character_length={"test": 417.94}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADNonCompeteLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADNonCompeteLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause restricts the ability of a party to compete with the counterparty or operate in a certain geography or business or technology sector.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_non-compete", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 442}, + avg_character_length={"test": 383.20}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADNonDisparagementLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADNonDisparagementLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause requires a party not to disparage the counterparty.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_non-disparagement", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 100}, + avg_character_length={"test": 403.08}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADNonTransferableLicenseLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADNonTransferableLicenseLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause limits the ability of a party to transfer the license being granted to a third party.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_non-transferable_license", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 542}, + avg_character_length={"test": 399.16}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADNoticePeriodToTerminateRenewalLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADNoticePeriodToTerminateRenewalLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies a notice period required to terminate renewal.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_notice_period_to_terminate_renewal", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 222}, + avg_character_length={"test": 354.85}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADPostTerminationServicesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADPostTerminationServicesLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause subjects a party to obligations after the termination or expiration of a contract, including any post-termination transition, payment, transfer of IP, wind-down, last-buy, or similar commitments.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_post-termination_services", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 808}, + avg_character_length={"test": 422.53}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADPriceRestrictionsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADPriceRestrictionsLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause places a restriction on the ability of a party to raise or reduce prices of technology, goods, or services provided.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_price_restrictions", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 46}, + avg_character_length={"test": 324.71}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADRenewalTermLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADRenewalTermLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies a renewal term.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_renewal_term", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 386}, + avg_character_length={"test": 340.87}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADRevenueProfitSharingLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADRevenueProfitSharingLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause require a party to share revenue or profit with the counterparty for any technology, goods, or services.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_revenue-profit_sharing", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 774}, + avg_character_length={"test": 371.55}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADRofrRofoRofnLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADRofrRofoRofnLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause grant one party a right of first refusal, right of first offer or right of first negotiation to purchase, license, market, or distribute equity interest, technology, assets, products or services.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_rofr-rofo-rofn", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 690}, + avg_character_length={"test": 395.46}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADSourceCodeEscrowLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADSourceCodeEscrowLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause requires one party to deposit its source code into escrow with a third party, which can be released to the counterparty upon the occurrence of certain events (bankruptcy, insolvency, etc.).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_source_code_escrow", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 118}, + avg_character_length={"test": 399.18}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADTerminationForConvenienceLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADTerminationForConvenienceLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies that one party can terminate this contract without cause (solely by giving a notice and allowing a waiting period to expire).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_termination_for_convenience", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 430}, + avg_character_length={"test": 326.30}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADThirdPartyBeneficiaryLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADThirdPartyBeneficiaryLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies that that there a non-contracting party who is a beneficiary to some or all of the clauses in the contract and therefore can enforce its rights against a contracting party.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_third_party_beneficiary", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 68}, + avg_character_length={"test": 261.04}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADUncappedLiabilityLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADUncappedLiabilityLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies that a party's liability is uncapped upon the breach of its obligation in the contract. This also includes uncap liability for a particular type of breach such as IP infringement or breach of confidentiality obligation.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_uncapped_liability", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 294}, + avg_character_length={"test": 441.04}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADUnlimitedAllYouCanEatLicenseLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADUnlimitedAllYouCanEatLicenseLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause grants one party an “enterprise,” “all you can eat” or unlimited usage license.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_unlimited-all-you-can-eat-license", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 48}, + avg_character_length={"test": 368.08}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADVolumeRestrictionLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADVolumeRestrictionLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies a fee increase or consent requirement, etc. if one party's use of the product/services exceeds certain threshold.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_volume_restriction", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 322}, + avg_character_length={"test": 306.27}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class CUADWarrantyDurationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CUADWarrantyDurationLegalBenchClassification", + description="This task was constructed from the CUAD dataset. It consists of determining if the clause specifies a duration of any warranty against defects or errors in technology, products, or services provided under the contract.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "cuad_warranty_duration", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{hendrycks2021cuad, + title={Cuad: An expert-annotated nlp dataset for legal contract review}, + author={Hendrycks, Dan and Burns, Collin and Chen, Anya and Ball, Spencer}, + journal={arXiv preprint arXiv:2103.06268}, + year={2021} + } + """, + n_samples={"test": 320}, + avg_character_length={"test": 352.27}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class DefinitionClassificationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="DefinitionClassificationLegalBenchClassification", + description="This task consists of determining whether or not a sentence from a Supreme Court opinion offers a definition of a term.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "definition_classification", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 1337}, + avg_character_length={"test": 253.72}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class Diversity1LegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="Diversity1LegalBenchClassification", + description="Given a set of facts about the citizenships of plaintiffs and defendants and the amounts associated with claims, determine if the criteria for diversity jurisdiction have been met (variant 1).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "diversity_1", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 300}, + avg_character_length={"test": 103.21}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + # Map the boolean columns to readable plaintext + _diverse_parties_map = { + "True": "The parties are diverse.", + "False": "The parties are not diverse.", + } + + _amount_in_controversy_map = { + "True": "The Amount-in-controversy was met.", + "False": "The Amount-in-controversy was not met.", + } + + self.dataset = self.dataset.map( + lambda example: { + "text": example["text"] + + " " + + _diverse_parties_map[example["parties_are_diverse"]] + + " " + + _amount_in_controversy_map[example["aic_is_met"]] + } + ) + self.dataset = self.dataset.remove_columns( + ["parties_are_diverse", "aic_is_met"] + ) + + +class Diversity2LegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="Diversity2LegalBenchClassification", + description="Given a set of facts about the citizenships of plaintiffs and defendants and the amounts associated with claims, determine if the criteria for diversity jurisdiction have been met (variant 2).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "diversity_2", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 300}, + avg_character_length={"test": 0}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + # Map the boolean columns to readable plaintext + _diverse_parties_map = { + "True": "The parties are diverse.", + "False": "The parties are not diverse.", + } + + _amount_in_controversy_map = { + "True": "The Amount-in-controversy was met.", + "False": "The Amount-in-controversy was not met.", + } + + self.dataset = self.dataset.map( + lambda example: { + "text": example["text"] + + " " + + _diverse_parties_map[example["parties_are_diverse"]] + + " " + + _amount_in_controversy_map[example["aic_is_met"]] + } + ) + self.dataset = self.dataset.remove_columns( + ["parties_are_diverse", "aic_is_met"] + ) + + +class Diversity3LegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="Diversity3LegalBenchClassification", + description="Given a set of facts about the citizenships of plaintiffs and defendants and the amounts associated with claims, determine if the criteria for diversity jurisdiction have been met (variant 3).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "diversity_3", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 300}, + avg_character_length={"test": 135.46}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + # Map the boolean columns to readable plaintext + _diverse_parties_map = { + "True": "The parties are diverse.", + "False": "The parties are not diverse.", + } + + _amount_in_controversy_map = { + "True": "The Amount-in-controversy was met.", + "False": "The Amount-in-controversy was not met.", + } + + self.dataset = self.dataset.map( + lambda example: { + "text": example["text"] + + " " + + _diverse_parties_map[example["parties_are_diverse"]] + + " " + + _amount_in_controversy_map[example["aic_is_met"]] + } + ) + self.dataset = self.dataset.remove_columns( + ["parties_are_diverse", "aic_is_met"] + ) + + +class Diversity4LegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="Diversity4LegalBenchClassification", + description="Given a set of facts about the citizenships of plaintiffs and defendants and the amounts associated with claims, determine if the criteria for diversity jurisdiction have been met (variant 4).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "diversity_4", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 300}, + avg_character_length={"test": 144.52}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + # Map the boolean columns to readable plaintext + _diverse_parties_map = { + "True": "The parties are diverse.", + "False": "The parties are not diverse.", + } + + _amount_in_controversy_map = { + "True": "The Amount-in-controversy was met.", + "False": "The Amount-in-controversy was not met.", + } + + self.dataset = self.dataset.map( + lambda example: { + "text": example["text"] + + " " + + _diverse_parties_map[example["parties_are_diverse"]] + + " " + + _amount_in_controversy_map[example["aic_is_met"]] + } + ) + self.dataset = self.dataset.remove_columns( + ["parties_are_diverse", "aic_is_met"] + ) + + +class Diversity5LegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="Diversity5LegalBenchClassification", + description="Given a set of facts about the citizenships of plaintiffs and defendants and the amounts associated with claims, determine if the criteria for diversity jurisdiction have been met (variant 5).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "diversity_5", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 300}, + avg_character_length={"test": 174.77}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + # Map the boolean columns to readable plaintext + _diverse_parties_map = { + "True": "The parties are diverse.", + "False": "The parties are not diverse.", + } + + _amount_in_controversy_map = { + "True": "The Amount-in-controversy was met.", + "False": "The Amount-in-controversy was not met.", + } + + self.dataset = self.dataset.map( + lambda example: { + "text": example["text"] + + " " + + _diverse_parties_map[example["parties_are_diverse"]] + + " " + + _amount_in_controversy_map[example["aic_is_met"]] + } + ) + self.dataset = self.dataset.remove_columns( + ["parties_are_diverse", "aic_is_met"] + ) + + +class Diversity6LegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="Diversity6LegalBenchClassification", + description="Given a set of facts about the citizenships of plaintiffs and defendants and the amounts associated with claims, determine if the criteria for diversity jurisdiction have been met (variant 6).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "diversity_6", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 300}, + avg_character_length={"test": 301.01}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + # Map the boolean columns to readable plaintext + _diverse_parties_map = { + "True": "The parties are diverse.", + "False": "The parties are not diverse.", + } + + _amount_in_controversy_map = { + "True": "The Amount-in-controversy was met.", + "False": "The Amount-in-controversy was not met.", + } + + self.dataset = self.dataset.map( + lambda example: { + "text": example["text"] + + " " + + _diverse_parties_map[example["parties_are_diverse"]] + + " " + + _amount_in_controversy_map[example["aic_is_met"]] + } + ) + self.dataset = self.dataset.remove_columns( + ["parties_are_diverse", "aic_is_met"] + ) + + +class FunctionOfDecisionSectionLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="FunctionOfDecisionSectionLegalBenchClassification", + description="""The task is to classify a paragraph extracted from a written court decision into one of seven possible categories: + 1. Facts - The paragraph describes the faction background that led up to the present lawsuit. + 2. Procedural History - The paragraph describes the course of litigation that led to the current proceeding before the court. + 3. Issue - The paragraph describes the legal or factual issue that must be resolved by the court. + 4. Rule - The paragraph describes a rule of law relevant to resolving the issue. + 5. Analysis - The paragraph analyzes the legal issue by applying the relevant legal principles to the facts of the present dispute. + 6. Conclusion - The paragraph presents a conclusion of the court. + 7. Decree - The paragraph constitutes a decree resolving the dispute. + """, + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "function_of_decision_section", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 367}, + avg_character_length={"test": 551.07}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("answer", "label") + + self.dataset = self.dataset.map( + lambda example: { + "text": example["Paragraph"] + + "\n\n" + + "Citation: " + + example["Citation"] + } + ) + + +class InsurancePolicyInterpretationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="InsurancePolicyInterpretationLegalBenchClassification", + description="Given an insurance claim and policy, determine whether the claim is covered by the policy.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "insurance_policy_interpretation", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 133}, + avg_character_length={"test": 521.88}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("answer", "label") + + self.dataset = self.dataset.map( + lambda example: { + "text": example["policy"] + "\n\n" + "Claim: " + example["claim"] + } + ) + + +class InternationalCitizenshipQuestionsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="InternationalCitizenshipQuestionsLegalBenchClassification", + description="Answer questions about citizenship law from across the world. Dataset was made using the GLOBALCIT citizenship law dataset, by constructing questions about citizenship law as Yes or No questions.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "international_citizenship_questions", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1960-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @misc{vink2023globalcit, + author = {Vink, Maarten and van der Baaren, Luuk and Bauböck, Rainer and Džankić, Jelena and Honohan, Iseult and Manby, Bronwen}, + title = {GLOBALCIT Citizenship Law Dataset, v2.0, Country-Year-Mode Data (Acquisition)}, + howpublished = {https://hdl.handle.net/1814/73190}, + year = {2023}, + publisher = {Global Citizenship Observatory} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 206.18}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_columns( + {"question": "text", "answer": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) + + +class JCrewBlockerLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="JCrewBlockerLegalBenchClassification", + description="The J.Crew Blocker, also known as the J.Crew Protection, is a provision included in leveraged loan documents to prevent companies from removing security by transferring intellectual property (IP) into new subsidiaries and raising additional debt. The task consists of detemining whether the J.Crew Blocker is present in the document.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "jcrew_blocker", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2016-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 54}, + avg_character_length={"test": 1092.22}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsBenefitsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsBenefitsLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's legal post discusses public benefits and social services that people can get from the government, like for food, disability, old age, housing, medical help, unemployment, child care, or other social needs.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_benefits", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 66}, + avg_character_length={"test": 1308.44}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsBusinessLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsBusinessLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's legal question discusses issues faced by people who run small businesses or nonprofits, including around incorporation, licenses, taxes, regulations, and other concerns. It also includes options when there are disasters, bankruptcies, or other problems.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_business", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 174}, + avg_character_length={"test": 1144.51}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsConsumerLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsConsumerLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues people face regarding money, insurance, consumer goods and contracts, taxes, and small claims about quality of service.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_consumer", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 614}, + avg_character_length={"test": 1277.45}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsCourtsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsCourtsLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses the logistics of how a person can interact with a lawyer or the court system. It applies to situations about procedure, rules, how to file lawsuits, how to hire lawyers, how to represent oneself, and other practical matters about dealing with these systems.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_courts", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 192}, + avg_character_length={"test": 1171.02}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsCrimeLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsCrimeLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues in the criminal system including when people are charged with crimes, go to a criminal trial, go to prison, or are a victim of a crime.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_crime", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 688}, + avg_character_length={"test": 1212.90}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsDivorceLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsDivorceLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues around filing for divorce, separation, or annulment, getting spousal support, splitting money and property, and following the court processes.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_divorce", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 150}, + avg_character_length={"test": 1242.43}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsDomesticViolenceLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsDomesticViolenceLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses dealing with domestic violence and abuse, including getting protective orders, enforcing them, understanding abuse, reporting abuse, and getting resources and status if there is abuse.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_domestic_violence", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 174}, + avg_character_length={"test": 1360.83}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsEducationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsEducationLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues around school, including accommodations for special needs, discrimination, student debt, discipline, and other issues in education.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_education", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 56}, + avg_character_length={"test": 1397.44}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsEmploymentLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsEmploymentLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues related to working at a job, including discrimination and harassment, worker's compensation, workers rights, unions, getting paid, pensions, being fired, and more.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_employment", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 710}, + avg_character_length={"test": 1262.74}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsEstatesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsEstatesLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses planning for end-of-life, possible incapacitation, and other special circumstances that would prevent a person from making decisions about their own well-being, finances, and property. This includes issues around wills, powers of attorney, advance directives, trusts, guardianships, conservatorships, and other estate issues that people and families deal with.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_estates", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 178}, + avg_character_length={"test": 1200.70}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsFamilyLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsFamilyLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues that arise within a family, like divorce, adoption, name change, guardianship, domestic violence, child custody, and other issues.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_family", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 1338.27}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) + + +class LearnedHandsHealthLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsHealthLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues with accessing health services, paying for medical care, getting public benefits for health care, protecting one's rights in medical settings, and other issues related to health.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_health", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 226}, + avg_character_length={"test": 1472.59}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsHousingLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsHousingLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses issues with paying your rent or mortgage, landlord-tenant issues, housing subsidies and public housing, eviction, and other problems with your apartment, mobile home, or house.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_housing", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 1322.54}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) + + +class LearnedHandsImmigrationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsImmigrationLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's post discusses visas, asylum, green cards, citizenship, migrant work and benefits, and other issues faced by people who are not full citizens in the US.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_immigration", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 134}, + avg_character_length={"test": 1216.31}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsTortsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsTortsLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's legal question discusses problems that one person has with another person (or animal), like when there is a car accident, a dog bite, bullying or possible harassment, or neighbors treating each other badly.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_torts", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 432}, + avg_character_length={"test": 1406.97}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LearnedHandsTrafficLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LearnedHandsTrafficLegalBenchClassification", + description="This is a binary classification task in which the model must determine if a user's legal post discusses problems with traffic and parking tickets, fees, driver's licenses, and other issues experienced with the traffic system. It also concerns issues with car accidents and injuries, cars' quality, repairs, purchases, and other contracts.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "learned_hands_traffic", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2022-05-21", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @dataset{learned_hands, + title = {LearnedHands Dataset}, + author = {{Suffolk University Law School} and {Stanford Legal Design Lab}}, + year = {2022}, + url = {https://spot.suffolklitlab.org/data/#learnedhands}, + note = {The LearnedHands dataset is licensed under CC BY-NC-SA 4.0}, + urldate = {2022-05-21} + } + """, + n_samples={"test": 556}, + avg_character_length={"test": 1182.91}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class LegalReasoningCausalityLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="LegalReasoningCausalityLegalBenchClassification", + description="Given an excerpt from a district court opinion, classify if it relies on statistical evidence in its reasoning.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "legal_reasoning_causality", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2020-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + """, + n_samples={"test": 55}, + avg_character_length={"test": 1563.76}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +_MAUD_DATASET_MAP = [ + { + "name": "maud_ability_to_consummate_concept_is_subject_to_mae_carveouts", + "filter_cols": [], + }, + { + "name": "maud_accuracy_of_fundamental_target_rws_bringdown_standard", + "filter_cols": [], + }, + { + "name": "maud_accuracy_of_target_capitalization_rw_(outstanding_shares)_bringdown_standard_answer", + "filter_cols": [], + }, + { + "name": "maud_accuracy_of_target_general_rw_bringdown_timing_answer", + "filter_cols": [], + }, + { + "name": "maud_additional_matching_rights_period_for_modifications_(cor)", + "filter_cols": [], + }, + { + "name": "maud_application_of_buyer_consent_requirement_(negative_interim_covenant)", + "filter_cols": [], + }, + { + "name": "maud_buyer_consent_requirement_(ordinary_course)", + "filter_cols": [], + }, + { + "name": "maud_change_in_law__subject_to_disproportionate_impact_modifier", + "filter_cols": [], + }, + { + "name": "maud_changes_in_gaap_or_other_accounting_principles__subject_to_disproportionate_impact_modifier", + "filter_cols": [], + }, + { + "name": "maud_cor_permitted_in_response_to_intervening_event", + "filter_cols": [], + }, + { + "name": "maud_cor_permitted_with_board_fiduciary_determination_only", + "filter_cols": [], + }, + { + "name": "maud_cor_standard_(intervening_event)", + "filter_cols": [], + }, + { + "name": "maud_cor_standard_(superior_offer)", + "filter_cols": [], + }, + { + "name": "maud_cor_standard_(superior_offer)", + "filter_cols": [], + }, + { + "name": "maud_definition_includes_asset_deals", + "filter_cols": [], + }, + { + "name": "maud_definition_includes_stock_deals", + "filter_cols": [], + }, + { + "name": "maud_fiduciary_exception__board_determination_standard", + "filter_cols": [], + }, + { + "name": "maud_fiduciary_exception_board_determination_trigger_(no_shop)", + "filter_cols": [], + }, + { + "name": "maud_financial_point_of_view_is_the_sole_consideration", + "filter_cols": [], + }, + { + "name": "maud_fls_(mae)_standard", + # The label "A" has only one example and the label "E" has only two examples, so we drop rows with them + "filter_cols": ["A", "E"], + }, + { + "name": "maud_general_economic_and_financial_conditions_subject_to_disproportionate_impact_modifier", + "filter_cols": [], + }, + { + "name": "maud_includes_consistent_with_past_practice", + "filter_cols": [], + }, + { + "name": "maud_initial_matching_rights_period_(cor)", + "filter_cols": [], + }, + { + "name": "maud_initial_matching_rights_period_(ftr)", + "filter_cols": [], + }, + { + "name": "maud_intervening_event_-_required_to_occur_after_signing_-_answer", + "filter_cols": [], + }, + { + "name": "maud_knowledge_definition", + "filter_cols": [], + }, + { + "name": "maud_liability_standard_for_no-shop_breach_by_target_non-do_representatives", + "filter_cols": [], + }, + { + "name": "maud_ordinary_course_efforts_standard", + "filter_cols": [], + }, + { + "name": "maud_pandemic_or_other_public_health_event__subject_to_disproportionate_impact_modifier", + "filter_cols": [], + }, + { + "name": "maud_pandemic_or_other_public_health_event_specific_reference_to_pandemic-related_governmental_responses_or_measures", + "filter_cols": [], + }, + { + "name": "maud_relational_language_(mae)_applies_to", + "filter_cols": [], + }, + { + "name": "maud_specific_performance", + "filter_cols": [], + }, + { + "name": "maud_tail_period_length", + # The labels "A" and "D" have only two examples, so we drop rows with them + "filter_cols": ["A", "D"], + }, + { + "name": "maud_type_of_consideration", + "filter_cols": [], + }, +] + + +class MAUDLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="MAUDLegalBenchClassification", + description="""This task was constructed from the MAUD dataset, which consists of over 47,000 labels across 152 merger agreements annotated to identify 92 questions in each agreement used by the 2021 American Bar Association (ABA) Public Target Deal Points Study. Each dataset is formatted as a series of multiple-choice questions, where given a segment of the merger agreement and a Deal Point question, the model is to choose the answer that best characterizes the agreement as response. + + This is a combination of all 34 of the MAUD Legal Bench datasets: + 1. MAUD Ability To Consummate Concept Is Subject To MAE Carveouts: Given an excerpt from a merger agreement and the task is to answer: is the “ability to consummate” concept subject to Material Adverse Effect (MAE) carveouts? amongst the multiple choice options. + 2. MAUD Accuracy Of Fundamental Target RWS Bringdown Standard: Given an excerpt from a merger agreement and the task is to answer: how accurate must the fundamental representations and warranties be according to the bring down provision, amongst the multiple choice options. + 3. MAUD Accuracy Of Target Capitalization RW Outstanding Shares Bringdown Standard Answer: Given an excerpt from a merger agreement and the task is to answer: how accurate must the fundamental representations and warranties be according to the bring down provision, amongst the multiple choice options. + 4. MAUD Accuracy Of Target General RW Bringdown Timing Answer: Given an excerpt from a merger agreement and the task is to answer: how accurate must the fundamental representations and warranties be according to the bring down provision, amongst the multiple choice options. + 5. MAUD Additional Matching Rights Period For Modifications Cor: Given an excerpt from a merger agreement and the task is to answer: how long is the additional matching rights period for modifications in case the board changes its recommendation, amongst the multiple choice options. + 6. MAUD Application Of Buyer Consent Requirement Negative Interim Covenant: Given an excerpt from a merger agreement and the task is to answer: what negative covenants does the requirement of Buyer consent apply to, amongst the multiple choice options. + 7. MAUD Buyer Consent Requirement Ordinary Course: Given an excerpt from a merger agreement and the task is to answer: in case the Buyer's consent for the acquired company's ordinary business operations is required, are there any limitations on the Buyer's right to condition, withhold, or delay their consent, amongst the multiple choice options. + 8. MAUD Change In Law Subject To Disproportionate Impact Modifier: Given an excerpt from a merger agreement and the task is to answer: do changes in law that have disproportionate impact qualify for Material Adverse Effect (MAE), amongst the multiple choice options. + 9. MAUD Changes In GAAP Or Other Accounting Principles Subject To Disproportionate Impact Modifier: Given an excerpt from a merger agreement and the task is to answer: do changes in GAAP or other accounting principles that have disproportionate impact qualify for Material Adverse Effect (MAE), amongst the multiple choice options. + 10. MAUD COR Permitted In Response To Intervening Event: Given an excerpt from a merger agreement and the task is to answer: is Change of Recommendation permitted in response to an intervening event, amongst the multiple choice options. + 11. MAUD COR Permitted With Board Fiduciary Determination Only: Given an excerpt from a merger agreement and the task is to answer: is Change of Recommendation permitted as long as the board determines that such change is required to fulfill its fiduciary obligations, amongst the multiple choice options. + 12. MAUD COR Standard Intervening Event: Given an excerpt from a merger agreement and the task is to answer: what standard should the board follow when determining whether to change its recommendation in response to an intervening event, amongst the multiple choice options. + 13. MAUD COR Standard Superior Offer: Given an excerpt from a merger agreement and the task is to answer: what standard should the board follow when determining whether to change its recommendation in connection with a superior offer, amongst the multiple choice options. + 14. MAUD Definition Contains Knowledge Requirement Answer: Given an excerpt from a merger agreement and the task is to answer: what is the knowledge requirement in the definition of “Intervening Event”, amongst the multiple choice options. + 15. MAUD Definition Includes Asset Deals: Given an excerpt from a merger agreement and the task is to answer: what qualifies as a superior offer in terms of asset deals, amongst the multiple choice options. + 16. MAUD Definition Includes Stock Deals: Given an excerpt from a merger agreement and the task is to answer: what qualifies as a superior offer in terms of stock deals, amongst the multiple choice options. + 17. MAUD Fiduciary Exception Board Determination Standard: Given an excerpt from a merger agreement and the task is to answer: under what circumstances could the Board take actions on a different acquisition proposal notwithstanding the no-shop provision, amongst the multiple choice options. + 18. MAUD Fiduciary Exception Board Determination Trigger No Shop: Given an excerpt from a merger agreement and the task is to answer: what type of offer could the Board take actions on notwithstanding the no-shop provision, amongst the multiple choice options. + 19. MAUD Financial Point Of View Is The Sole Consideration: Given an excerpt from a merger agreement and the task is to answer: is “financial point of view” the sole consideration when determining whether an offer is superior, amongst the multiple choice options. + 20. MAUD FLS MAE Standard: Given an excerpt from a merger agreement and the task is to answer: what is the Forward Looking Standard (FLS) with respect to Material Adverse Effect (MAE), amongst the multiple choice options. + 21. MAUD General Economic and Financial Conditions Subject To Disproportionate Impact Modifier: Given an excerpt from a merger agreement and the task is to answer: do changes caused by general economic and financial conditions that have disproportionate impact qualify for Material Adverse Effect (MAE), amongst the multiple choice options. + 22. MAUD Includes Consistent With Past Practice: Given an excerpt from a merger agreement and the task is to answer: does the wording of the Efforts Covenant clause include “consistent with past practice”, amongst the multiple choice options. + 23. MAUD Initial Matching Rights Period COR: Given an excerpt from a merger agreement and the task is to answer: how long is the initial matching rights period in case the board changes its recommendation, amongst the multiple choice options. + 24. MAUD Initial Matching Rights Period FTR: Given an excerpt from a merger agreement and the task is to answer: how long is the initial matching rights period in connection with the Fiduciary Termination Right (FTR), amongst the multiple choice options. + 25. MAUDInterveningEventRequiredToOccurAfterSigningAnswer: Given an excerpt from a merger agreement and the task is to answer: is an “Intervening Event” required to occur after signing, amongst the multiple choice options. + 26. MAUD Knowledge Definition: Given an excerpt from a merger agreement and the task is to answer: what counts as Knowledge, amongst the multiple choice options. + 27. MAUDLiabilityStandardForNoShopBreachByTargetNonDORepresentatives: Given an excerpt from a merger agreement and the task is to answer: what is the liability standard for no-shop breach by Target Non-D&O Representatives, amongst the multiple choice options. + 28. MAUD Ordinary Course Efforts Standard: Given an excerpt from a merger agreement and the task is to answer: what is the efforts standard, amongst the multiple choice options. + 29. MAUD Pandemic Or Other Public Health Event Subject To Disproportionate Impact Modifier: Given an excerpt from a merger agreement and the task is to answer: do pandemics or other public health events have to have disproportionate impact to qualify for Material Adverse Effect (MAE), amongst the multiple choice options. + 30. MAUD Pandemic Or Other Public Health Event Specific Reference To Pandemic Related Governmental Responses Or Measures: Given an excerpt from a merger agreement and the task is to answer: is there specific reference to pandemic-related governmental responses or measures in the clause that qualifies pandemics or other public health events for Material Adverse Effect (MAE), amongst the multiple choice options. + 31. MAUD Relational Language MAE Applies To: Given an excerpt from a merger agreement and the task is to answer: what carveouts pertaining to Material Adverse Effect (MAE) does the relational language apply to?, amongst the multiple choice options. + 32. MAUD Specific Performance: Given an excerpt from a merger agreement and the task is to answer: what is the wording of the Specific Performance clause regarding the parties' entitlement in the event of a contractual breach, amongst the multiple choice options. + 33. MAUD Tail Period Length: Given an excerpt from a merger agreement and the task is to answer: how long is the Tail Period, amongst the multiple choice options. + 34. MAUD Type Of Consideration: Given an excerpt from a merger agreement and the task is to answer: what type of consideration is specified in this agreement, amongst the multiple choice options. + """, + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2021-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @article{wang2023maud, + title={MAUD: An Expert-Annotated Legal NLP Dataset for Merger Agreement Understanding}, + author={Wang, Steven H and Scardigli, Antoine and Tang, Leonard and Chen, Wei and Levkin, Dimitry and Chen, Anya and Ball, Spencer and Woodside, Thomas and Zhang, Oliver and Hendrycks, Dan}, + journal={arXiv preprint arXiv:2301.00876}, + year={2023} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 1802.93}, + ) + + def load_data(self, **kwargs: Any) -> None: + """Load dataset from HuggingFace hub""" + if self.data_loaded: + return + + _hf_dataset = None + class_count = 0 + for dataset_col_map in _MAUD_DATASET_MAP: + _dataset = datasets.load_dataset( + self.metadata_dict["dataset"]["path"], + dataset_col_map["name"], + revision=self.metadata_dict["dataset"]["revision"], + trust_remote_code=True, + ) + + _dataset = _dataset.rename_column("answer", "label") + + # Remove classes with less than 2 examples + _dataset = _dataset.filter( + lambda example: example["label"] not in dataset_col_map["filter_cols"] + ) + + # Get all labels in the dataset + unique_classes = list(set().union(*_dataset.unique("label").values())) + mapping = { + class_val: str(new_label) + for class_val, new_label in zip( + unique_classes, + range(class_count, class_count + len(unique_classes)), + ) + } + _dataset = _dataset.map( + lambda example: { + "label": mapping.get(example["label"].lower(), example["label"]) + } + ) + class_count += len(unique_classes) + 1 + + if _hf_dataset is None: + _hf_dataset = _dataset + else: + _hf_dataset["train"] = datasets.concatenate_datasets( + [_hf_dataset["train"], _dataset["train"]] + ) + _hf_dataset["test"] = datasets.concatenate_datasets( + [_hf_dataset["test"], _dataset["test"]] + ) + + self.dataset = _hf_dataset + self.dataset_transform() + self.data_loaded = True + + def dataset_transform(self): + # The train split has one example in each dataset, so we combine it with the test split and resample + self.dataset = concatenate_datasets( + [self.dataset["train"], self.dataset["test"]] + ) + self.dataset = self.dataset.class_encode_column("label") + self.dataset = self.dataset.train_test_split( + train_size=0.2, seed=self.seed, stratify_by_column="label" + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) + + +class NYSJudicialEthicsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="NYSJudicialEthicsLegalBenchClassification", + description="Answer questions on judicial ethics from the New York State Unified Court System Advisory Committee.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "nys_judicial_ethics", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="mit", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + """, + n_samples={"test": 292}, + avg_character_length={"test": 159.45}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_columns( + {"answer": "label", "question": "text"} + ) + + +class OPP115DataRetentionLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115DataRetentionLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describes how long user information is stored.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_data_retention", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 88}, + avg_character_length={"test": 195.20}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115DataSecurityLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115DataSecurityLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describes how user information is protected.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_data_security", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 1334}, + avg_character_length={"test": 246.69}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115DoNotTrackLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115DoNotTrackLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describes if and how Do Not Track signals for online tracking and advertising are honored.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_do_not_track", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 110}, + avg_character_length={"test": 223.16}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115FirstPartyCollectionUseLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115FirstPartyCollectionUseLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describes how and why a service provider collects user information.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_first_party_collection_use", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 2086}, + avg_character_length={"test": 204.25}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115InternationalAndSpecificAudiencesLegalBenchClassification( + AbsTaskClassification +): + metadata = TaskMetadata( + name="OPP115InternationalAndSpecificAudiencesLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describe practices that pertain only to a specific group of users (e.g., children, Europeans, or California residents).", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_international_and_specific_audiences", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 980}, + avg_character_length={"test": 327.71}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115PolicyChangeLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115PolicyChangeLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describes if and how users will be informed about changes to the privacy policy.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_policy_change", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 431}, + avg_character_length={"test": 200.99}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115ThirdPartySharingCollectionLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115ThirdPartySharingCollectionLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describe how user information may be shared with or collected by third parties.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_third_party_sharing_collection", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 1590}, + avg_character_length={"test": 223.64}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115UserAccessEditAndDeletionLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115UserAccessEditAndDeletionLegalBenchClassification", + description="Given a clause from a privacy policy, classify if the clause describes if and how users may access, edit, or delete their information.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_user_access,_edit_and_deletion", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 462}, + avg_character_length={"test": 218.59}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OPP115UserChoiceControlLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OPP115UserChoiceControlLegalBenchClassification", + description="Given a clause fro ma privacy policy, classify if the clause describes the choices and control options available to users.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "opp115_user_choice_control", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-nc-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @inproceedings{wilson2016creation, + title={The creation and analysis of a website privacy policy corpus}, + author={Wilson, Shomir and Schaub, Florian and Dara, Aswarth Abhilash and Liu, Frederick and Cherivirala, Sushain and Leon, Pedro Giovanni and Andersen, Mads Schaarup and Zimmeck, Sebastian and Sathyendra, Kanthashree Mysore and Russell, N Cameron and others}, + booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={1330--1340}, + year={2016} + } + """, + n_samples={"test": 1546}, + avg_character_length={"test": 210.62}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class OralArgumentQuestionPurposeLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OralArgumentQuestionPurposeLegalBenchClassification", + description="""This task classifies questions asked by Supreme Court justices at oral argument into seven categories: + 1. Background - questions seeking factual or procedural information that is missing or not clear in the briefing + 2. Clarification - questions seeking to get an advocate to clarify her position or the scope of the rule being advocated for + 3. Implications - questions about the limits of a rule or its implications for future cases + 4. Support - questions offering support for the advocate’s position + 5. Criticism - questions criticizing an advocate’s position + 6. Communicate - question designed primarily to communicate with other justices + 7. Humor - questions designed to interject humor into the argument and relieve tension + """, + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "oral_argument_question_purpose", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2021-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 312}, + avg_character_length={"test": 269.71}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"answer": "label", "question": "text"} + ) + + +class OverrulingLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="OverrulingLegalBenchClassification", + description="""This task consists of classifying whether or not a particular sentence of case law overturns the decision of a previous case.""", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "overruling", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("1965-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @inproceedings{zheng2021does, + title={When does pretraining help? assessing self-supervised learning for law and the casehold dataset of 53,000+ legal holdings}, + author={Zheng, Lucia and Guha, Neel and Anderson, Brandon R and Henderson, Peter and Ho, Daniel E}, + booktitle={Proceedings of the eighteenth international conference on artificial intelligence and law}, + pages={159--168}, + year={2021} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 167.20}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) + + +class PersonalJurisdictionLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="PersonalJurisdictionLegalBenchClassification", + description="""Given a fact pattern describing the set of contacts between a plaintiff, defendant, and forum, determine if a court in that forum could excercise personal jurisdiction over the defendant.""", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "personal_jurisdiction", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 50}, + avg_character_length={"test": 381.14}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class PROALegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="PROALegalBenchClassification", + description="""Given a statute, determine if the text contains an explicit private right of action. Given a privacy policy clause and a description of the clause, determine if the description is correct. A private right of action (PROA) exists when a statute empowers an ordinary individual (i.e., a private person) to legally enforce their rights by bringing an action in court. In short, a PROA creates the ability for an individual to sue someone in order to recover damages or halt some offending conduct. PROAs are ubiquitous in antitrust law (in which individuals harmed by anti-competitive behavior can sue offending firms for compensation) and environmental law (in which individuals can sue entities which release hazardous substances for damages).""", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "proa", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + """, + n_samples={"test": 95}, + avg_character_length={"test": 251.73}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDBPAccountabilityLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDBPAccountabilityLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose whether the retail seller or manufacturer maintains internal compliance procedures on company standards regarding human trafficking and slavery? This includes any type of internal accountability mechanism. Requiring independently of the supply to comply with laws does not qualify or asking for documentary evidence of compliance does not count either.'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_best_practice_accountability", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 379}, + avg_character_length={"test": 3520}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDBPAuditsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDBPAuditsLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose whether the retail seller or manufacturer performs any type of audit, or reserves the right to audit?'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_best_practice_audits", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 379}, + avg_character_length={"test": 3507}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDBPCertificationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDBPCertificationLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose whether the retail seller or manufacturer performs any type of audit, or reserves the right to audit?'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_best_practice_certification", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 378}, + avg_character_length={"test": 3507}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDBPTrainingLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDBPTrainingLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose whether the retail seller or manufacturer provides training to employees on human trafficking and slavery? Broad policies such as ongoing dialogue on mitigating risks of human trafficking and slavery or increasing managers and purchasers knowledge about health, safety and labor practices qualify as training. Providing training to contractors who failed to comply with human trafficking laws counts as training.'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_best_practice_training", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 379}, + avg_character_length={"test": 3506}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDBPVerificationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDBPVerificationLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose whether the retail seller or manufacturer engages in verification and auditing as one practice, expresses that it may conduct an audit, or expressess that it is assessing supplier risks through a review of the US Dept. of Labor's List?'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_best_practice_verification", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 379}, + avg_character_length={"test": 3498}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDDAccountabilityLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDDAccountabilityLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose to what extent, if any, that the retail seller or manufacturer maintains internal accountability standards and procedures for employees or contractors failing to meet company standards regarding slavery and trafficking?'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_disclosed_accountability", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 378}, + avg_character_length={"test": 3522}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDDAuditsLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDDAuditsLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose to what extent, if any, that the retail seller or manufacturer conducts audits of suppliers to evaluate supplier compliance with company standards for trafficking and slavery in supply chains? The disclosure shall specify if the verification was not an independent, unannounced audit.'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_disclosed_audits", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 379}, + avg_character_length={"test": 3506}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDDCertificationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDDCertificationLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose to what extent, if any, that the retail seller or manufacturer requires direct suppliers to certify that materials incorporated into the product comply with the laws regarding slavery and human trafficking of the country or countries in which they are doing business?'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_disclosed_certification", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 378}, + avg_character_length={"test": 3518}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDDTrainingLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDDTrainingLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose to what extent, if any, that the retail seller or manufacturer provides company employees and management, who have direct responsibility for supply chain management, training on human trafficking and slavery, particularly with respect to mitigating risks within the supply chains of products?'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_disclosed_training", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 379}, + avg_character_length={"test": 3499}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class SCDDVerificationLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SCDDVerificationLegalBenchClassification", + description="This is a binary classification task in which the LLM must determine if a supply chain disclosure meets the following coding criteria: 'Does the above statement disclose to what extent, if any, that the retail seller or manufacturer engages in verification of product supply chains to evaluate and address risks of human trafficking and slavery? If the company conducts verification], the disclosure shall specify if the verification was not conducted by a third party.'", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "supply_chain_disclosure_disclosed_verification", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2010-01-01", "2015-06-30"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }, + @article{chilton2017limitations, + title={The limitations of supply chain disclosure regimes}, + author={Chilton, Adam S and Sarfaty, Galit A}, + journal={Stan. J. Int'l L.}, + volume={53}, + pages={1}, + year={2017}, + publisher={HeinOnline} + } + """, + n_samples={"test": 379}, + avg_character_length={"test": 3503}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class TelemarketingSalesRuleLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TelemarketingSalesRuleLegalBenchClassification", + description="Determine how 16 C.F.R. § 310.3(a)(1) and 16 C.F.R. § 310.3(a)(2) (governing deceptive practices) apply to different fact patterns. This dataset is designed to test a model’s ability to apply 16 C.F.R. § 310.3(a)(1) and 16 C.F.R. § 310.3(a)(2) of the Telemarketing Sales Rule to a simple fact pattern with a clear outcome. Each fact pattern ends with the question: “Is this a violation of the Telemarketing Sales Rule?” Each fact pattern is paired with the answer “Yes” or the answer “No.” Fact patterns are listed in the column “text,” and answers are listed in the column “label.”", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "telemarketing_sales_rule", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2020-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + """, + n_samples={"test": 47}, + avg_character_length={"test": 348.29}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class TextualismToolDictionariesLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TextualismToolDictionariesLegalBenchClassification", + description="Determine if a paragraph from a judicial opinion is applying a form textualism that relies on the dictionary meaning of terms.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "textualism_tool_dictionaries", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2020-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + """, + n_samples={"test": 107}, + avg_character_length={"test": 943.23}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class TextualismToolPlainLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TextualismToolPlainLegalBenchClassification", + description="Determine if a paragraph from a judicial opinion is applying a form textualism that relies on the ordinary (“plain”) meaning of terms.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "textualism_tool_plain", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2020-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + """, + n_samples={"test": 165}, + avg_character_length={"test": 997.97}, + ) + + def dataset_transform(self): + mapping = {"yes": 1, "no": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_column("answer", "label") + + +class UCCVCommonLawLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="UCCVCommonLawLegalBenchClassification", + description="Determine if a contract is governed by the Uniform Commercial Code (UCC) or the common law of contracts.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "ucc_v_common_law", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2020-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + """, + n_samples={"test": 94}, + avg_character_length={"test": 114.127}, + ) + + def dataset_transform(self): + mapping = {"ucc": 1, "common law": 0} + self.dataset = self.dataset.map( + lambda example: { + "answer": mapping.get(example["answer"].lower(), example["answer"]) + } + ) + self.dataset = self.dataset.rename_columns( + {"answer": "label", "contract": "text"} + ) + + +class UnfairTOSLegalBenchClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="UnfairTOSLegalBenchClassification", + description="Given a clause from a terms-of-service contract, determine the category the clause belongs to. The purpose of this task is classifying clauses in Terms of Service agreements. Clauses have been annotated by into nine categories: ['Arbitration', 'Unilateral change', 'Content removal', 'Jurisdiction', 'Choice of law', 'Limitation of liability', 'Unilateral termination', 'Contract by using', 'Other']. The first eight categories correspond to clauses that would potentially be deemed potentially unfair. The last category (Other) corresponds to clauses in agreements which don’t fit into these categories.", + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "name": "unfair_tos", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2006-01-01", "2023-08-23"), + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @article{lippi2019claudette, + title={CLAUDETTE: an automated detector of potentially unfair clauses in online terms of service}, + author={Lippi, Marco and Pa{\l}ka, Przemys{\l}aw and Contissa, Giuseppe and Lagioia, Francesca and Micklitz, Hans-Wolfgang and Sartor, Giovanni and Torroni, Paolo}, + journal={Artificial Intelligence and Law}, + volume={27}, + pages={117--139}, + year={2019}, + publisher={Springer} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 184.69}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("answer", "label") + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/NewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/NewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..d0af87813358e44a00f49b667de50e18fe9e915b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/NewsClassification.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class NewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="NewsClassification", + description="Large News Classification Dataset", + dataset={ + "path": "fancyzhx/ag_news", + "revision": "eb185aade064a813bc0b7f42de02595523103ca4", + }, + reference="https://arxiv.org/abs/1509.01626", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=( + "2004-01-01", + "2015-12-31", + ), # Estimated range for the collection of news articles + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="Apache 2.0", + socioeconomic_status="medium", + annotations_creators="expert-annotated", + dialect=["eng-Latn-US", "en-Latn-GB", "en-Latn-AU"], + text_creation="found", + bibtex_citation="", + n_samples={"test": 7600}, + avg_character_length={"test": 235.29}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/PatentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/PatentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..ab62332f9a9e160044cfb63ed91e281fefd9558d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/PatentClassification.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class PatentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="PatentClassification", + description="Classification Dataset of Patents and Abstract", + dataset={ + "path": "ccdv/patent-classification", + "revision": "2f38a1dfdecfacee0184d74eaeafd3c0fb49d2a6", + }, + reference="https://aclanthology.org/P19-1212.pdf", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2021-11-05", "2022-10-22"), + form=["written"], + domains=["Legal"], + task_subtypes=["Topic classification"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{sharma-etal-2019-bigpatent, + title = "{BIGPATENT}: A Large-Scale Dataset for Abstractive and Coherent Summarization", + author = "Sharma, Eva and + Li, Chen and + Wang, Lu", + editor = "Korhonen, Anna and + Traum, David and + M{\`a}rquez, Llu{\'\i}s", + booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", + month = jul, + year = "2019", + address = "Florence, Italy", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/P19-1212", + doi = "10.18653/v1/P19-1212", + pages = "2204--2213", + abstract = "Most existing text summarization datasets are compiled from the news domain, where summaries have a flattened discourse structure. In such datasets, summary-worthy content often appears in the beginning of input articles. Moreover, large segments from input articles are present verbatim in their respective summaries. These issues impede the learning and evaluation of systems that can understand an article{'}s global content structure as well as produce abstractive summaries with high compression ratio. In this work, we present a novel dataset, BIGPATENT, consisting of 1.3 million records of U.S. patent documents along with human written abstractive summaries. Compared to existing summarization datasets, BIGPATENT has the following properties: i) summaries contain a richer discourse structure with more recurring entities, ii) salient content is evenly distributed in the input, and iii) lesser and shorter extractive fragments are present in the summaries. Finally, we train and evaluate baselines and popular learning models on BIGPATENT to shed light on new challenges and motivate future directions for summarization research.", + }""", + n_samples={"test": 5000}, + avg_character_length={"test": 18620.44}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicChatClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicChatClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..8ff0a3be629b15ca7d0ef77571d690fb71c75939 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicChatClassification.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +_EVAL_SPLITS = ["test"] + + +class ToxicChatClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ToxicChatClassification", + description="""This dataset contains toxicity annotations on 10K user + prompts collected from the Vicuna online demo. We utilize a human-AI + collaborative annotation framework to guarantee the quality of annotation + while maintaining a feasible annotation workload. The details of data + collection, pre-processing, and annotation can be found in our paper. + We believe that ToxicChat can be a valuable resource to drive further + advancements toward building a safe and healthy environment for user-AI + interactions. + Only human annotated samples are selected here.""", + reference="https://aclanthology.org/2023.findings-emnlp.311/", + dataset={ + "path": "lmsys/toxic-chat", + "name": "toxicchat0124", + "revision": "3e0319203c7162b9c9f8015b594441f979c199bc", + }, + type="Classification", + category="s2s", + eval_splits=_EVAL_SPLITS, + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2023-10-26", "2024-01-31"), + form=["written"], + domains=["Constructed"], + task_subtypes=["Sentiment/Hate speech"], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{lin2023toxicchat, + title={ToxicChat: Unveiling Hidden Challenges of Toxicity Detection in Real-World User-AI Conversation}, + author={Zi Lin and Zihan Wang and Yongqi Tong and Yangkun Wang and Yuxin Guo and Yujia Wang and Jingbo Shang}, + year={2023}, + eprint={2310.17389}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }""", + n_samples={"test": 1427}, + avg_character_length={"test": 189.4}, + ) + + def dataset_transform(self): + keep_cols = ["user_input", "toxicity"] + rename_dict = dict(zip(keep_cols, ["text", "label"])) + remove_cols = [ + col + for col in self.dataset[_EVAL_SPLITS[0]].column_names + if col not in keep_cols + ] + self.dataset = self.dataset.rename_columns(rename_dict) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) + # only use human-annotated data + self.dataset = self.dataset.filter(lambda x: x["human_annotation"]) + self.dataset = self.dataset.remove_columns(remove_cols) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicConversationsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicConversationsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..7202ad8ed6b4ff70ba1116e5164328853fde6981 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicConversationsClassification.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class ToxicConversationsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="ToxicConversationsClassification", + description="Collection of comments from the Civil Comments platform together with annotations if the comment is toxic or not.", + reference="https://www.kaggle.com/competitions/jigsaw-unintended-bias-in-toxicity-classification/overview", + dataset={ + "path": "mteb/toxic_conversations_50k", + "revision": "edfaf9da55d3dd50d43143d90c1ac476895ae6de", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=( + "2017-01-01", + "2018-12-31", + ), # Estimated range for the collection of comments + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="CC BY 4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{jigsaw-unintended-bias-in-toxicity-classification, + author = {cjadams, Daniel Borkan, inversion, Jeffrey Sorensen, Lucas Dixon, Lucy Vasserman, nithum}, + title = {Jigsaw Unintended Bias in Toxicity Classification}, + publisher = {Kaggle}, + year = {2019}, + url = {https://kaggle.com/competitions/jigsaw-unintended-bias-in-toxicity-classification} +}""", + n_samples={"test": 50000}, + avg_character_length={"test": 296.6}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetSentimentExtractionClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetSentimentExtractionClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5ad6facf0ad169a53950394460ebe2db1cc45f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetSentimentExtractionClassification.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class TweetSentimentExtractionClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TweetSentimentExtractionClassification", + description="", + reference="https://www.kaggle.com/competitions/tweet-sentiment-extraction/overview", + dataset={ + "path": "mteb/tweet_sentiment_extraction", + "revision": "d604517c81ca91fe16a244d1248fc021f9ecee7a", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=( + "2020-01-01", + "2020-12-31", + ), # Estimated range for the collection of tweets + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{tweet-sentiment-extraction, + author = {Maggie, Phil Culliton, Wei Chen}, + title = {Tweet Sentiment Extraction}, + publisher = {Kaggle}, + year = {2020}, + url = {https://kaggle.com/competitions/tweet-sentiment-extraction} +}""", + n_samples={"test": 3534}, + avg_character_length={"test": 67.8}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = dict(self.metadata) + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 32 + return metadata_dict diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetTopicSingleClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetTopicSingleClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..5ebf96e67f37d7a2501d9a2a2a2d61410eb97368 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetTopicSingleClassification.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TweetTopicSingleClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TweetTopicSingleClassification", + description="""Topic classification dataset on Twitter with 6 labels. Each instance of + TweetTopic comes with a timestamp which distributes from September 2019 to August 2021. + Tweets were preprocessed before the annotation to normalize some artifacts, converting + URLs into a special token {{URL}} and non-verified usernames into {{USERNAME}}. For verified + usernames, we replace its display name (or account name) with symbols {@}. + """, + dataset={ + "path": "cardiffnlp/tweet_topic_single", + "revision": "87b7a0d1c402dbb481db649569c556d9aa27ac05", + }, + reference="https://arxiv.org/abs/2209.09824", + type="Classification", + category="s2s", + eval_splits=["test_2021"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2019-09-01", "2021-08-31"), + form=["written"], + domains=["Social", "News"], + task_subtypes=["Topic classification"], + license="Other", + socioeconomic_status="medium", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{dimosthenis-etal-2022-twitter, + title = "{T}witter {T}opic {C}lassification", + author = "Antypas, Dimosthenis and + Ushio, Asahi and + Camacho-Collados, Jose and + Neves, Leonardo and + Silva, Vitor and + Barbieri, Francesco", + booktitle = "Proceedings of the 29th International Conference on Computational Linguistics", + month = oct, + year = "2022", + address = "Gyeongju, Republic of Korea", + publisher = "International Committee on Computational Linguistics" + } + """, + n_samples={"test_2021": 1693}, + avg_character_length={"test_2021": 167.66}, + ) + + def dataset_transform(self): + self.dataset["train"] = self.dataset["train_2021"] diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fas/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoHateSpeechClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoHateSpeechClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..e4724ed81373a7ba80ca373e97808cab7f036e20 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoHateSpeechClassification.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +TEST_SAMPLES = 2048 + + +class FilipinoHateSpeechClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="FilipinoHateSpeechClassification", + description="Filipino Twitter dataset for sentiment classification.", + reference="https://pcj.csp.org.ph/index.php/pcj/issue/download/29/PCJ%20V14%20N1%20pp1-14%202019", + dataset={ + "path": "hate_speech_filipino", + "revision": "1994e9bb7f3ec07518e3f0d9e870cb293e234686", + }, + type="Classification", + category="s2s", + date=("2019-08-01", "2019-08-01"), + eval_splits=["validation", "test"], + eval_langs=["fil-Latn"], + main_score="accuracy", + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @article{Cabasag-2019-hate-speech, + title={Hate speech in Philippine election-related tweets: Automatic detection and classification using natural language processing.}, + author={Neil Vicente Cabasag, Vicente Raphael Chan, Sean Christian Lim, Mark Edward Gonzales, and Charibeth Cheng}, + journal={Philippine Computing Journal}, + volume={XIV}, + number={1}, + month={August}, + year={2019} + } + """, + n_samples={"validation": TEST_SAMPLES, "test": TEST_SAMPLES}, + avg_character_length={"validation": 88.1, "test": 87.4}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["validation", "test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoShopeeReviewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoShopeeReviewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..fa356de4c7d98afbf187ee02f880cf675f62025a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoShopeeReviewsClassification.py @@ -0,0 +1,44 @@ +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class FilipinoShopeeReviewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="FilipinoShopeeReviewsClassification", + description="The Shopee reviews tl 15 dataset is constructed by randomly taking 2100 training samples and 450 samples for testing and validation for each review star from 1 to 5. In total, there are 10500 training samples and 2250 each in validation and testing samples.", + reference="https://uijrt.com/articles/v4/i8/UIJRTV4I80009.pdf", + dataset={ + "path": "scaredmeow/shopee-reviews-tl-stars", + "revision": "d096f402fdc76886458c0cfb5dedc829bea2b935", + }, + type="Classification", + task_subtypes=["Sentiment/Hate speech"], + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["fil-Latn"], + form=["written"], + domains=["Social"], + license="MPL-2.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + date=("2022-05-13", "2023-05-13"), + main_score="accuracy", + bibtex_citation=""" + @article{riegoenhancement, + title={Enhancement to Low-Resource Text Classification via Sequential Transfer Learning}, + author={Riego, Neil Christian R. and Villarba, Danny Bell and Sison, Ariel Antwaun Rolando C. and Pineda, Fernandez C. and Lagunzad, Herminiño C.} + journal={United International Journal for Research & Technology}, + volume={04}, + issue={08}, + pages={72--82} + }""", + n_samples={"validation": 2250, "test": 2250}, + avg_character_length={"validation": 143.8, "test": 145.1}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["validation", "test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/por/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/por/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ron/RomanianReviewsSentiment.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ron/RomanianReviewsSentiment.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce9140333ab00f78defbc45f2c30e64ba6cd908 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ron/RomanianReviewsSentiment.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2048 + + +class RomanianReviewsSentiment(AbsTaskClassification): + metadata = TaskMetadata( + name="RomanianReviewsSentiment", + description="LaRoSeDa (A Large Romanian Sentiment Data Set) contains 15,000 reviews written in Romanian", + reference="https://arxiv.org/abs/2101.04197", + dataset={ + "path": "universityofbucharest/laroseda", + "revision": "358bcc95aeddd5d07a4524ee416f03d993099b23", + }, + type="Classification", + category="s2s", + date=("2020-01-01", "2021-01-11"), + eval_splits=["test"], + eval_langs=["ron-Latn"], + main_score="accuracy", + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="CC-BY-4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" +@article{ + tache2101clustering, + title={Clustering Word Embeddings with Self-Organizing Maps. Application on LaRoSeDa -- A Large Romanian Sentiment Data Set}, + author={Anca Maria Tache and Mihaela Gaman and Radu Tudor Ionescu}, + journal={ArXiv}, + year = {2021} +} +""", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 588.6}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"content": "text", "starRating": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/san/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/san/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/sin/SinhalaNewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/sin/SinhalaNewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..da3b06c807a6fc640207162e5e0234a49f9b4268 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/sin/SinhalaNewsClassification.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class SinhalaNewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SinhalaNewsClassification", + description="This file contains news texts (sentences) belonging to 5 different news categories (political, business, technology, sports and Entertainment). The original dataset was released by Nisansa de Silva (Sinhala Text Classification: Observations from the Perspective of a Resource Poor Language, 2015).", + dataset={ + "path": "NLPC-UOM/Sinhala-News-Category-classification", + "revision": "7fb2f514ea683c5282dfec0a9672ece8de90ac50", + }, + reference="https://huggingface.co/datasets/NLPC-UOM/Sinhala-News-Category-classification", + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["sin-Sinh"], + main_score="accuracy", + date=("2019-03-17", "2020-08-06"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="mit", + socioeconomic_status="low", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{deSilva2015, + author = {Nisansa de Silva}, + title = {Sinhala Text Classification: Observations from the Perspective of a Resource Poor Language}, + journal = {Year of Publication}, + year = {2015}, + } + @article{dhananjaya2022, + author = {Dhananjaya et al.}, + title = {BERTifying Sinhala - A Comprehensive Analysis of Pre-trained Language Models for Sinhala Text Classification}, + journal = {Year of Publication}, + year = {2022}, + }""", + n_samples={"train": 3327}, + avg_character_length={"train": 148.04}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"comments": "text", "labels": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["train"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/slk/CSFDSKMovieReviewSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/slk/CSFDSKMovieReviewSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..ccd44a72650a651b2f094cceac6141e429ec6a0b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/slk/CSFDSKMovieReviewSentimentClassification.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2048 + + +class CSFDSKMovieReviewSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="CSFDSKMovieReviewSentimentClassification", + description="The dataset contains 30k user reviews from csfd.cz in Slovak.", + reference="https://arxiv.org/abs/2304.01922", + dataset={ + "path": "fewshot-goes-multilingual/sk_csfd-movie-reviews", + "revision": "23a20c659d868740ef9c54854de631fe19cd5c17", + }, + type="Classification", + category="s2s", + date=("2002-05-21", "2020-03-05"), + eval_splits=["test"], + eval_langs=["slk-Latn"], + main_score="accuracy", + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="CC-BY-SA-4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" +@misc{štefánik2023resources, + title={Resources and Few-shot Learners for In-context Learning in Slavic Languages}, + author={Michal Štefánik and Marek Kadlčík and Piotr Gramacki and Petr Sojka}, + year={2023}, + eprint={2304.01922}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 366.2}, + ) + + @property + def metadata_dict(self): + md = super().metadata_dict + # Increase the samples_per_label in order to improve baseline performance + md["samples_per_label"] = 20 + return md + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"comment": "text", "rating_int": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"], n_samples=N_SAMPLES + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/slv/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/slv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/spa/SpanishSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/spa/SpanishSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..b44e7b64c70617eef0ca23c6cce33e88df1954fe --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/spa/SpanishSentimentClassification.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class SpanishSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SpanishSentimentClassification", + description="A Spanish dataset for sentiment classification.", + reference="https://huggingface.co/datasets/sepidmnorozy/Spanish_sentiment", + dataset={ + "path": "sepidmnorozy/Spanish_sentiment", + "revision": "2a6e340e4b59b7c0a78c03a0b79ac27e1b4a2662", + }, + type="Classification", + category="s2s", + date=("2022-08-16", "2022-08-16"), + eval_splits=["validation", "test"], + eval_langs=["spa-Latn"], + main_score="accuracy", + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{mollanorozy-etal-2023-cross, + title = "Cross-lingual Transfer Learning with \{P\}ersian", + author = "Mollanorozy, Sepideh and + Tanti, Marc and + Nissim, Malvina", + editor = "Beinborn, Lisa and + Goswami, Koustava and + Murado{\\u{g}}lu, Saliha and + Sorokin, Alexey and + Kumar, Ritesh and + Shcherbakov, Andreas and + Ponti, Edoardo M. and + Cotterell, Ryan and + Vylomova, Ekaterina", + booktitle = "Proceedings of the 5th Workshop on Research in Computational Linguistic Typology and Multilingual NLP", + month = may, + year = "2023", + address = "Dubrovnik, Croatia", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2023.sigtyp-1.9", + doi = "10.18653/v1/2023.sigtyp-1.9", + pages = "89--95", + } + """, + n_samples={"validation": 147, "test": 296}, + avg_character_length={"validation": 85.02, "test": 87.91}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ssw/SiswatiNewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ssw/SiswatiNewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..f57f19060cd83db1a0367fde5c088876172511c6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ssw/SiswatiNewsClassification.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2800 + + +class SiswatiNewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SiswatiNewsClassification", + description="Siswati News Classification Dataset", + reference="https://huggingface.co/datasets/dsfsi/za-isizulu-siswati-news", + dataset={ + "path": "isaacchung/siswati-news", + "revision": "f5502326c4e48adc99b18b1582f68b8fb5e7ec30", + }, + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["ssw-Latn"], + main_score="accuracy", + date=("2022-08-01", "2022-08-01"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="CC-BY-SA-4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@article{Madodonga_Marivate_Adendorff_2023, title={Izindaba-Tindzaba: Machine learning news categorisation for Long and Short Text for isiZulu and Siswati}, volume={4}, url={https://upjournals.up.ac.za/index.php/dhasa/article/view/4449}, DOI={10.55492/dhasa.v4i01.4449}, author={Madodonga, Andani and Marivate, Vukosi and Adendorff, Matthew}, year={2023}, month={Jan.} } + """, + n_samples={"train": 80}, + avg_character_length={"train": 354.2}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns({"title": "text"}) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ssw/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ssw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/svk/SlovakMovieReviewSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/svk/SlovakMovieReviewSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..0d4ee003af762d35806abffa6a838f2d7dd20a5b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/svk/SlovakMovieReviewSentimentClassification.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class SlovakMovieReviewSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SlovakMovieReviewSentimentClassification", + description="User reviews of movies on the CSFD movie database, with 2 sentiment classes (positive, negative)", + reference="https://arxiv.org/pdf/2304.01922", + dataset={ + "path": "janko/sk_csfd-movie-reviews", + "revision": "0c47583c9d339b3b6f89e4db76088af5f1ec8d39", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["svk-Latn"], + main_score="accuracy", + date=("2002-05-21", "2020-03-05"), + form=["written"], + dialect=[], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="CC BY-NC-SA 4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + text_creation="found", + bibtex_citation=""" + @article{vstefanik2023resources, + title={Resources and Few-shot Learners for In-context Learning in Slavic Languages}, + author={{\v{S}}tef{\'a}nik, Michal and Kadl{\v{c}}{\'\i}k, Marek and Gramacki, Piotr and Sojka, Petr}, + journal={arXiv preprint arXiv:2304.01922}, + year={2023} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 366.17}, + ) + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns({"comment": "text"}) + + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/svk/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/svk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/DalajClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/DalajClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..8aced2907a580324f0920fd3b6367b7689b6f8ff --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/DalajClassification.py @@ -0,0 +1,73 @@ +# SuperLIM tasks +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class DalajClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="DalajClassification", + dataset={ + "path": "AI-Sweden/SuperLim", + "revision": "7ebf0b4caa7b2ae39698a889de782c09e6f5ee56", + "name": "dalaj", + }, + description="A Swedish dataset for linguistic acceptability. Available as a part of Superlim.", + reference="https://spraakbanken.gu.se/en/resources/superlim", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["swe-Latn"], + main_score="accuracy", + date=("2017-01-01", "2020-12-31"), + form=["written"], + domains=["Non-fiction"], + task_subtypes=["Linguistic acceptability"], + license="CC-BY-4.0", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + text_creation="created", + bibtex_citation="""@misc{2105.06681, +Author = {Elena Volodina and Yousuf Ali Mohammed and Julia Klezl}, +Title = {DaLAJ - a dataset for linguistic acceptability judgments for Swedish: Format, baseline, sharing}, +Year = {2021}, +Eprint = {arXiv:2105.06681}, +}""", + n_samples={"test": 444}, + avg_character_length={"test": 243.8}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 16 + return metadata_dict + + def dataset_transform(self): + """This dataset consist of two columns of relevance, "original_sentence" and "corrected_sentence". + We will use the original sentence as we "wrong" sentence and the corrected sentence as the "correct" sentence + """ + + def __convert_sample_to_classification(sample): + text = sample["original_sentence"] + sample["corrected_sentence"] + label = [1] * len(sample["original_sentence"]) + [0] * len( + sample["corrected_sentence"] + ) + return {"text": text, "label": label} + + columns_to_keep = ["original_sentence", "corrected_sentence"] + for split in self.dataset: + columns_names = self.dataset[split].column_names # type: ignore + columns_to_remove = [ + col for col in columns_names if col not in columns_to_keep + ] + self.dataset[split] = self.dataset[split].remove_columns(columns_to_remove) # type: ignore + + self.dataset = self.dataset.map( + __convert_sample_to_classification, + batched=True, + remove_columns=columns_to_keep, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/SweRecClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/SweRecClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b9335429eb70bfaaacfb54bc33816df4f42340 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/SweRecClassification.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class SweRecClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SweRecClassification", + description="A Swedish dataset for sentiment classification on review", + reference="https://aclanthology.org/2023.nodalida-1.20/", + dataset={ + "path": "mteb/swerec_classification", + "revision": "b07c6ce548f6a7ac8d546e1bbe197a0086409190", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["swe-Latn"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 1024}, + avg_character_length={"test": 318.8}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/SwedishSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/SwedishSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..d06c07ad966b8bc4a34dff9f04fb8f20ca0c405d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/SwedishSentimentClassification.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 1024 + + +class SwedishSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="SwedishSentimentClassification", + description="Dataset of Swedish reviews scarped from various public available websites", + reference="https://huggingface.co/datasets/swedish_reviews", + dataset={ + "path": "timpal0l/swedish_reviews", + "revision": "105ba6b3cb99b9fd64880215be469d60ebf44a1b", + }, + type="Classification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["swe-Latn"], + main_score="accuracy", + date=("2021-01-01", "2022-01-01"), + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"validation": N_SAMPLES, "test": N_SAMPLES}, + avg_character_length={"validation": 499.3, "test": 498.1}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["validation", "test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/swe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tam/TamilNewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tam/TamilNewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..d782d5bfc4dda669832d747874568b9e532efae3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tam/TamilNewsClassification.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TamilNewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TamilNewsClassification", + description="A Tamil dataset for 6-class classification of Tamil news articles", + reference="https://github.com/vanangamudi/tamil-news-classification", + dataset={ + "path": "mlexplorer008/tamil_news_classification", + "revision": "bb34dd6690cf17aa731d75d45388c5801b8c4e4b", + }, + type="Classification", + category="s2s", + date=("2014-01-01", "2018-01-01"), + eval_splits=["test"], + eval_langs=["tam-Taml"], + main_score="f1", + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="MIT", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=None, + n_samples={"train": 14521, "test": 3631}, + avg_character_length={"train": 56.50, "test": 56.52}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"NewsInTamil": "text", "Category": "label"} + ) + self.dataset = self.stratified_subsampling(self.dataset, seed=self.seed) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tam/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tel/TeluguAndhraJyotiNewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tel/TeluguAndhraJyotiNewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..915e890dcc058bbfa0d52e4afbf506a69f64a1cb --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tel/TeluguAndhraJyotiNewsClassification.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TeluguAndhraJyotiNewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TeluguAndhraJyotiNewsClassification", + description="A Telugu dataset for 5-class classification of Telugu news articles", + reference="https://github.com/AnushaMotamarri/Telugu-Newspaper-Article-Dataset", + dataset={ + "path": "mlexplorer008/telugu_news_classification", + "revision": "3821aa93aa461c9263071e0897234e8d775ad616", + }, + type="Classification", + category="s2s", + date=("2014-01-01", "2018-01-01"), + eval_splits=["test"], + eval_langs=["tel-Telu"], + main_score="f1", + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="MIT", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": 4329}, + avg_character_length={"test": 1428.28}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns({"body": "text", "topic": "label"}) + self.dataset = self.stratified_subsampling(self.dataset, seed=self.seed) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/WisesightSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/WisesightSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..bc9fdee056d9c8ba4b6035342628adbf24d06d1c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/WisesightSentimentClassification.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class WisesightSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="WisesightSentimentClassification", + description="Wisesight Sentiment Corpus: Social media messages in Thai language with sentiment label (positive, neutral, negative, question)", + reference="https://github.com/PyThaiNLP/wisesight-sentiment", + dataset={ + "path": "pythainlp/wisesight_sentiment", + "revision": "14aa5773afa135ba835cc5179bbc4a63657a42ae", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["tha-Thai"], + main_score="f1", + date=("2019-05-24", "2021-09-16"), + form=["written"], + dialect=[], + domains=["Social", "News"], + task_subtypes=["Sentiment/Hate speech"], + license="cc0-1.0", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + text_creation="found", + bibtex_citation="""@software{bact_2019_3457447, + author = {Suriyawongkul, Arthit and + Chuangsuwanich, Ekapol and + Chormai, Pattarawat and + Polpanumas, Charin}, + title = {PyThaiNLP/wisesight-sentiment: First release}, + month = sep, + year = 2019, + publisher = {Zenodo}, + version = {v1.0}, + doi = {10.5281/zenodo.3457447}, + url = {https://doi.org/10.5281/zenodo.3457447} +} + +""", + n_samples={"train": 2048}, + avg_character_length={"train": 103.42}, + ) + + def dataset_transform(self): + for split in self.dataset.keys(): + self.dataset[split] = self.dataset[split].rename_column("texts", "text") + self.dataset[split] = self.dataset[split].rename_column("category", "label") + + self.dataset = self.stratified_subsampling( + self.dataset, + seed=self.seed, + splits=["test"], + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/WongnaiReviewsClassification .py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/WongnaiReviewsClassification .py new file mode 100644 index 0000000000000000000000000000000000000000..43dc95a0fc2fbdfc3a5cfbf6dd6c6afae46d9109 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/WongnaiReviewsClassification .py @@ -0,0 +1,49 @@ +from mteb.abstasks.AbsTaskClassification import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class WongnaiReviewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="WongnaiReviewsClassification ", + description="Wongnai features over 200,000 restaurants, beauty salons, and spas across Thailand on its platform, with detailed information about each merchant and user reviews. In this dataset there are 5 classes corressponding each star rating", + reference="https://github.com/wongnai/wongnai-corpus", + dataset={ + "path": "wongnai_reviews", + "revision": "e708d4545d7ab10dd2c6b5b5b2a72ca28685dae2", + }, + type="Classification", + category="p2p", + eval_splits=["test"], + eval_langs=["tha-Thai"], + main_score="accuracy", + date=("2018-01-01", "2018-12-31"), + form=["written"], + dialect=[], + domains=["Reviews"], + task_subtypes=[], + license="LGPL-3.0", + socioeconomic_status="mixed", + annotations_creators="derived", + text_creation="found", + bibtex_citation=""" + @software{cstorm125_2020_3852912, + author = {cstorm125 and lukkiddd}, + title = {PyThaiNLP/classification-benchmarks: v0.1-alpha}, + month = may, + year = 2020, + publisher = {Zenodo}, + version = {v0.1-alpha}, + doi = {10.5281/zenodo.3852912}, + url = {https://doi.org/10.5281/zenodo.3852912} + }""", + n_samples={"test": 2048}, + avg_character_length={"test": 540.3717}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"review_body": "text", "star_rating": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tha/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tsn/TswanaNewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tsn/TswanaNewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..8d07cc3760158f2051c2b452e514a11d797850ac --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tsn/TswanaNewsClassification.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TswanaNewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TswanaNewsClassification", + description="Tswana News Classification Dataset", + reference="https://link.springer.com/chapter/10.1007/978-3-031-49002-6_17", + dataset={ + "path": "dsfsi/daily-news-dikgang", + "revision": "061ca1525717eebaaa9bada240f6cbb31eb3aa87", + }, + type="Classification", + task_subtypes=["Topic classification"], + category="s2s", + eval_splits=["test"], + eval_langs=["tsn-Latn"], + main_score="accuracy", + date=("2015-01-01", "2023-01-01"), + form=["written"], + domains=["News"], + license="CC-BY-SA-4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{marivate2023puoberta, + title = {PuoBERTa: Training and evaluation of a curated language model for Setswana}, + author = {Vukosi Marivate and Moseli Mots'Oehli and Valencia Wagner and Richard Lastrucci and Isheanesu Dzingirai}, + year = {2023}, + booktitle= {SACAIR 2023 (To Appear)}, + keywords = {NLP}, + preprint_url = {https://arxiv.org/abs/2310.09141}, + dataset_url = {https://github.com/dsfsi/PuoBERTa}, + software_url = {https://huggingface.co/dsfsi/PuoBERTa} + } + """, + n_samples={"validation": 487, "test": 487}, + avg_character_length={"validation": 2417.72, "test": 2369.52}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tsn/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tsn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/TurkishMovieSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/TurkishMovieSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..aea1703cfb97d9ced0f469547b7a6bdcb7031ce0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/TurkishMovieSentimentClassification.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TurkishMovieSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TurkishMovieSentimentClassification", + description="Turkish Movie Review Dataset", + reference="https://www.win.tue.nl/~mpechen/publications/pubs/MT_WISDOM2013.pdf", + dataset={ + "path": "asparius/Turkish-Movie-Review", + "revision": "409a4415cce5f6bcfca6d5f3ca3c408211ca00b3", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["tur-Latn"], + main_score="accuracy", + date=("2013-01-01", "2013-08-11"), + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{Demirtas2013CrosslingualPD, + title={Cross-lingual polarity detection with machine translation}, + author={Erkin Demirtas and Mykola Pechenizkiy}, + booktitle={wisdom}, + year={2013}, + url={https://api.semanticscholar.org/CorpusID:3912960} + } + """, + n_samples={"test": 2644}, + avg_character_length={"test": 141.50}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/TurkishProductSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/TurkishProductSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..00f09fd9036c76910a71ab7761ecc77ce7bfe223 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/TurkishProductSentimentClassification.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TurkishProductSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="TurkishProductSentimentClassification", + description="Turkish Product Review Dataset", + reference="https://www.win.tue.nl/~mpechen/publications/pubs/MT_WISDOM2013.pdf", + dataset={ + "path": "asparius/Turkish-Product-Review", + "revision": "ad861e463abda351ff65ca5ac0cc5985afe9eb99", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["tur-Latn"], + main_score="accuracy", + date=("2013-01-01", "2013-08-11"), + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{Demirtas2013CrosslingualPD, + title={Cross-lingual polarity detection with machine translation}, + author={Erkin Demirtas and Mykola Pechenizkiy}, + booktitle={wisdom}, + year={2013}, + url={https://api.semanticscholar.org/CorpusID:3912960} + } + """, + n_samples={"test": 800}, + avg_character_length={"test": 246.85}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/tur/__init__.py @@ -0,0 +1 @@ + diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ukr/UkrFormalityClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ukr/UkrFormalityClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..a230905be55f58d8d1ae3ce1bddb6ec8a1c45d13 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ukr/UkrFormalityClassification.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class UkrFormalityClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="UkrFormalityClassification", + description=""" + This dataset contains Ukrainian Formality Classification dataset obtained by + trainslating English GYAFC data. + English data source: https://aclanthology.org/N18-1012/ + Translation into Ukrainian language using model: https://huggingface.co/facebook/nllb-200-distilled-600M + Additionally, the dataset was balanced, witha labels: 0 - informal, 1 - formal. + """, + dataset={ + "path": "ukr-detect/ukr-formality-dataset-translated-gyafc", + "revision": "671d1e6bbf45a74ef21af351fd4ef7b32b7856f8", + }, + reference="https://huggingface.co/datasets/ukr-detect/ukr-formality-dataset-translated-gyafc", + type="Classification", + category="s2s", + eval_splits=["train", "test"], + eval_langs=["ukr-Cyrl"], + main_score="accuracy", + date=("2018-04-11", "2018-06-20"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="openrail++", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="machine-translated", + bibtex_citation="""@inproceedings{rao-tetreault-2018-dear, + title = "Dear Sir or Madam, May {I} Introduce the {GYAFC} Dataset: Corpus, Benchmarks and Metrics for Formality Style Transfer", + author = "Rao, Sudha and + Tetreault, Joel", + booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)", + month = jun, + year = "2018", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/N18-1012", + }""", + n_samples={"train": 2048, "test": 2048}, + avg_character_length={"train": 52.10, "test": 53.07}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_column("labels", "label") + self.dataset = self.dataset.class_encode_column("label") + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["train", "test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ukr/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ukr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/urd/UrduRomanSentimentClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/urd/UrduRomanSentimentClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..c7899693456d024f06d0e8568375f02ef59d934f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/urd/UrduRomanSentimentClassification.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class UrduRomanSentimentClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="UrduRomanSentimentClassification", + description="The Roman Urdu dataset is a data corpus comprising of more than 20000 records tagged for sentiment (Positive, Negative, Neutral)", + reference="https://archive.ics.uci.edu/dataset/458/roman+urdu+data+set", + dataset={ + "path": "roman_urdu", + "revision": "566be6449bb30b9b9f2b59173391647fe0ca3224", + }, + type="Classification", + category="s2s", + date=("2018-01-01", "2018-08-28"), + eval_splits=["train"], + eval_langs=["urd-Latn"], + main_score="f1", + form=["written"], + domains=["Social"], + task_subtypes=["Sentiment/Hate speech"], + license="MIT", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{misc_roman_urdu_data_set_458, + author = {Sharf,Zareen}, + title = {{Roman Urdu Data Set}}, + year = {2018}, + howpublished = {UCI Machine Learning Repository}, + note = {{DOI}: https://doi.org/10.24432/C58325} +} + """, + n_samples={"train": 2048}, + avg_character_length={"train": 68.248}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"sentence": "text", "sentiment": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["train"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/urd/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/urd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/vie/VieStudentFeedbackClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/vie/VieStudentFeedbackClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..137ac84cff4e8e4211dc9e2aed3eb4bfecc167b1 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/vie/VieStudentFeedbackClassification.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +TEST_SAMPLES = 2048 + + +class VieStudentFeedbackClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="VieStudentFeedbackClassification", + description="A Vietnamese dataset for classification of student feedback", + reference="https://ieeexplore.ieee.org/document/8573337", + dataset={ + "path": "uitnlp/vietnamese_students_feedback", + "revision": "7b56c6cb1c9c8523249f407044c838660df3811a", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["vie-Latn"], + main_score="accuracy", + date=("2021-12-26", "2021-12-26"), + form=["written"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="MIT", + socioeconomic_status="medium", + annotations_creators="human-annotated", + dialect=[], + text_creation="created", + bibtex_citation="""@InProceedings{8573337, + author={Nguyen, Kiet Van and Nguyen, Vu Duc and Nguyen, Phu X. V. and Truong, Tham T. H. and Nguyen, Ngan Luu-Thuy}, + booktitle={2018 10th International Conference on Knowledge and Systems Engineering (KSE)}, + title={UIT-VSFC: Vietnamese Students’ Feedback Corpus for Sentiment Analysis}, + year={2018}, + volume={}, + number={}, + pages={19-24}, + doi={10.1109/KSE.2018.8573337} +}""", + n_samples={"test": TEST_SAMPLES}, + avg_character_length={"test": 14.22}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"sentence": "text", "sentiment": "label"} + ) + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/vie/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/vie/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/CMTEBClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/CMTEBClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..d4aead87eb97a9292b01fea3e65792050da27e8c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/CMTEBClassification.py @@ -0,0 +1,217 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClassification + + +class TNews(AbsTaskClassification): + metadata = TaskMetadata( + name="TNews", + description="Short Text Classification for News", + reference="https://www.cluebenchmarks.com/introduce.html", + dataset={ + "path": "C-MTEB/TNews-classification", + "revision": "317f262bf1e6126357bbe89e875451e4b0938fe4", + }, + type="Classification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["samples_per_label"] = 32 + return metadata_dict + + +class IFlyTek(AbsTaskClassification): + metadata = TaskMetadata( + name="IFlyTek", + description="Long Text classification for the description of Apps", + reference="https://www.cluebenchmarks.com/introduce.html", + dataset={ + "path": "C-MTEB/IFlyTek-classification", + "revision": "421605374b29664c5fc098418fe20ada9bd55f8a", + }, + type="Classification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["samples_per_label"] = 32 + metadata_dict["n_experiments"] = 5 + return metadata_dict + + +class MultilingualSentiment(AbsTaskClassification): + metadata = TaskMetadata( + name="MultilingualSentiment", + description="A collection of multilingual sentiments datasets grouped into 3 classes -- positive, neutral, negative", + reference="https://github.com/tyqiangz/multilingual-sentiment-datasets", + dataset={ + "path": "C-MTEB/MultilingualSentiment-classification", + "revision": "46958b007a63fdbf239b7672c25d0bea67b5ea1a", + }, + type="Classification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["samples_per_label"] = 32 + return metadata_dict + + +class JDReview(AbsTaskClassification): + metadata = TaskMetadata( + name="JDReview", + description="review for iphone", + reference="https://aclanthology.org/2023.nodalida-1.20/", + dataset={ + "path": "C-MTEB/JDReview-classification", + "revision": "b7c64bd89eb87f8ded463478346f76731f07bf8b", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["samples_per_label"] = 32 + return metadata_dict + + +class OnlineShopping(AbsTaskClassification): + metadata = TaskMetadata( + name="OnlineShopping", + description="Sentiment Analysis of User Reviews on Online Shopping Websites", + reference="https://aclanthology.org/2023.nodalida-1.20/", + dataset={ + "path": "C-MTEB/OnlineShopping-classification", + "revision": "e610f2ebd179a8fda30ae534c3878750a96db120", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["samples_per_label"] = 32 + return metadata_dict + + +class Waimai(AbsTaskClassification): + metadata = TaskMetadata( + name="Waimai", + description="Sentiment Analysis of user reviews on takeaway platforms", + reference="https://aclanthology.org/2023.nodalida-1.20/", + dataset={ + "path": "C-MTEB/waimai-classification", + "revision": "339287def212450dcaa9df8c22bf93e9980c7023", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["samples_per_label"] = 32 + + return metadata_dict diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/YueOpenriceReviewClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/YueOpenriceReviewClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..bc760e0308998501bb7d2bd3e7d743510e5d98ff --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/YueOpenriceReviewClassification.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class YueOpenriceReviewClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="YueOpenriceReviewClassification", + description="A Cantonese dataset for review classification", + reference="https://github.com/Christainx/Dataset_Cantonese_Openrice", + dataset={ + "path": "izhx/yue-openrice-review", + "revision": "1300d045cf983bac23faadf3aa12a619624769da", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["yue-Hant"], + main_score="accuracy", + date=("2019-01-01", "2019-05-01"), + form=["spoken"], + domains=["Reviews"], + task_subtypes=["Sentiment/Hate speech"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{xiang2019sentiment, + title={Sentiment Augmented Attention Network for Cantonese Restaurant Review Analysis}, + author={Xiang, Rong and Jiao, Ying and Lu, Qin}, + booktitle={Proceedings of the 8th KDD Workshop on Issues of Sentiment Discovery and Opinion Mining (WISDOM)}, + pages={1--9}, + year={2019}, + organization={KDD WISDOM} +}""", + n_samples={"test": 6161}, + avg_character_length={"test": 173.0}, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["n_experiments"] = 10 + metadata_dict["samples_per_label"] = 32 + return metadata_dict + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"] + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zho/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zul/IsiZuluNewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zul/IsiZuluNewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..7772fb45c5b803613c14885a00b49c51d9a450b3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zul/IsiZuluNewsClassification.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2800 + + +class IsiZuluNewsClassification(AbsTaskClassification): + metadata = TaskMetadata( + name="IsiZuluNewsClassification", + description="isiZulu News Classification Dataset", + reference="https://huggingface.co/datasets/dsfsi/za-isizulu-siswati-news", + dataset={ + "path": "isaacchung/isizulu-news", + "revision": "55caf0e52693a1ea63b15a4980a73fc137fb862b", + }, + type="Classification", + category="s2s", + eval_splits=["train"], + eval_langs=["zul-Latn"], + main_score="accuracy", + date=("2022-08-01", "2022-08-01"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="CC-BY-SA-4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@article{Madodonga_Marivate_Adendorff_2023, title={Izindaba-Tindzaba: Machine learning news categorisation for Long and Short Text for isiZulu and Siswati}, volume={4}, url={https://upjournals.up.ac.za/index.php/dhasa/article/view/4449}, DOI={10.55492/dhasa.v4i01.4449}, author={Madodonga, Andani and Marivate, Vukosi and Adendorff, Matthew}, year={2023}, month={Jan.} } + """, + n_samples={"train": 752}, + avg_character_length={"train": 43.1}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns({"title": "text"}) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zul/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/zul/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9e651fd47c65f6b2c3c6620d3f5ec712cad22773 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/__init__.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from .deu.BlurbsClusteringP2P import * +from .deu.BlurbsClusteringS2S import * +from .deu.TenKGnadClusteringP2P import * +from .deu.TenKGnadClusteringS2S import * +from .eng.ArxivClusteringP2P import * +from .eng.ArxivClusteringS2S import * +from .eng.ArXivHierarchicalClustering import * +from .eng.BigPatentClustering import * +from .eng.BiorxivClusteringP2P import * +from .eng.BiorxivClusteringS2S import * +from .eng.MedrxivClusteringP2P import * +from .eng.MedrxivClusteringS2S import * +from .eng.RedditClustering import * +from .eng.RedditClusteringP2P import * +from .eng.StackExchangeClustering import * +from .eng.StackExchangeClusteringP2P import * +from .eng.TwentyNewsgroupsClustering import * +from .eng.WikiCitiesClustering import * +from .fra.AlloProfClusteringP2P import * +from .fra.AlloProfClusteringS2S import * +from .fra.HALClusteringS2S import * +from .jpn.LivedoorNewsClustering import * +from .jpn.MewsC16JaClustering import * +from .multilingual.IndicReviewsClusteringP2P import * +from .multilingual.MasakhaNEWSClusteringP2P import * +from .multilingual.MasakhaNEWSClusteringS2S import * +from .multilingual.MLSUMClusteringP2P import * +from .multilingual.MLSUMClusteringS2S import * +from .multilingual.SIB200ClusteringS2S import * +from .multilingual.WikiClusteringP2P import * +from .nob.snl_clustering import * +from .nob.SNLHierarchicalClustering import * +from .nob.vg_clustering import * +from .nob.VGHierarchicalClustering import * +from .pol.PolishClustering import * +from .rom.RomaniBibleClustering import * +from .spa.SpanishNewsClusteringP2P import * +from .swe.swedn_clustering import * +from .swe.SwednClustering import * +from .zho.CMTEBClustering import * diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/BlurbsClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/BlurbsClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..2a9c6aacafb9ca155fad7f51d59f8d3e46495bfe --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/BlurbsClusteringP2P.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import itertools + +import numpy as np +from datasets import Dataset, DatasetDict + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +NUM_SAMPLES = 2048 + + +class BlurbsClusteringP2P(AbsTaskClustering): + superseeded_by = "BlurbsClusteringP2P.v2" + + metadata = TaskMetadata( + name="BlurbsClusteringP2P", + description="Clustering of book titles+blurbs. Clustering of 28 sets, either on the main or secondary genre.", + reference="https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc.html", + dataset={ + "path": "slvnwhrl/blurbs-clustering-p2p", + "revision": "a2dd5b02a77de3466a3eaa98ae586b5610314496", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 174637}, + avg_character_length={"test": 664.09}, + ) + + +class BlurbsClusteringP2PFast(AbsTaskClusteringFast): + # a faster version of BlurbsClusteringP2P, since it does not sample from the same distribution we can't use the AbsTaskClusteringFast, instead we + # simply downsample each cluster. + + metadata = TaskMetadata( + name="BlurbsClusteringP2P.v2", + description="Clustering of book titles+blurbs. Clustering of 28 sets, either on the main or secondary genre.", + reference="https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc.html", + dataset={ + "path": "slvnwhrl/blurbs-clustering-p2p", + "revision": "a2dd5b02a77de3466a3eaa98ae586b5610314496", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=( + "1900-01-01", + "2019-12-31", + ), # since it is books it is likely to be from the 20th century -> paper from 2019 + form=["written"], + domains=["Fiction"], + task_subtypes=["Thematic clustering"], + license="cc-by-nc-4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{Remus2019GermEval2T, + title={GermEval 2019 Task 1: Hierarchical Classification of Blurbs}, + author={Steffen Remus and Rami Aly and Chris Biemann}, + booktitle={Conference on Natural Language Processing}, + year={2019}, + url={https://api.semanticscholar.org/CorpusID:208334484} +}""", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={"test": 664.09}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + + # Remove sentences and labels with only 1 label example. + unique_labels, counts = np.unique(labels, return_counts=True) + solo_label_idx = np.where(counts == 1) + solo_labels = unique_labels[solo_label_idx] + for solo_label in solo_labels: + loc = labels.index(solo_label) + labels.pop(loc) + sentences.pop(loc) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/BlurbsClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/BlurbsClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..a02076f0ea28f6872ffbf3f96afe600a2a69800d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/BlurbsClusteringS2S.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import itertools + +import numpy as np +from datasets import Dataset, DatasetDict + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +NUM_SAMPLES = 2048 + + +class BlurbsClusteringS2S(AbsTaskClustering): + superseeded_by = "BlurbsClusteringS2S.v2" + + metadata = TaskMetadata( + name="BlurbsClusteringS2S", + description="Clustering of book titles. Clustering of 28 sets, either on the main or secondary genre.", + reference="https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc.html", + dataset={ + "path": "slvnwhrl/blurbs-clustering-s2s", + "revision": "22793b6a6465bf00120ad525e38c51210858132c", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 174637}, + avg_character_length={"test": 23.02}, + ) + + +class BlurbsClusteringS2SFast(AbsTaskClusteringFast): + # a faster version of the task, since it does not sample from the same distribution we can't use the AbsTaskClusteringFast, instead we + # simply downsample each cluster. + + metadata = TaskMetadata( + name="BlurbsClusteringS2S.v2", + description="Clustering of book titles. Clustering of 28 sets, either on the main or secondary genre.", + reference="https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc.html", + dataset={ + "path": "slvnwhrl/blurbs-clustering-s2s", + "revision": "22793b6a6465bf00120ad525e38c51210858132c", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=( + "1900-01-01", + "2019-12-31", + ), # since it is books it is likely to be from the 20th century -> paper from 2019 + form=["written"], + domains=["Fiction"], + task_subtypes=["Thematic clustering"], + license="cc-by-nc-4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{Remus2019GermEval2T, + title={GermEval 2019 Task 1: Hierarchical Classification of Blurbs}, + author={Steffen Remus and Rami Aly and Chris Biemann}, + booktitle={Conference on Natural Language Processing}, + year={2019}, + url={https://api.semanticscholar.org/CorpusID:208334484} +}""", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={"test": 23.02}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + + # Remove sentences and labels with only 1 label example. + unique_labels, counts = np.unique(labels, return_counts=True) + solo_label_idx = np.where(counts == 1) + solo_labels = unique_labels[solo_label_idx] + for solo_label in solo_labels: + loc = labels.index(solo_label) + labels.pop(loc) + sentences.pop(loc) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/TenKGnadClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/TenKGnadClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..c63e72d3afd03988c628744847cfbdf9fc0e1cea --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/TenKGnadClusteringP2P.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast, convert_to_fast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TenKGnadClusteringP2P(AbsTaskClustering): + superseeded_by = "TenKGnadClusteringP2P.v2" + + metadata = TaskMetadata( + name="TenKGnadClusteringP2P", + description="Clustering of news article titles+subheadings+texts. Clustering of 10 splits on the news article category.", + reference="https://tblock.github.io/10kGNAD/", + dataset={ + "path": "slvnwhrl/tenkgnad-clustering-p2p", + "revision": "5c59e41555244b7e45c9a6be2d720ab4bafae558", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 45914}, + avg_character_length={"test": 2641.03}, + ) + + +class TenKGnadClusteringP2PFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="TenKGnadClusteringP2P.v2", + description="Clustering of news article titles+subheadings+texts. Clustering of 10 splits on the news article category.", + reference="https://tblock.github.io/10kGNAD/", + dataset={ + "path": "slvnwhrl/tenkgnad-clustering-p2p", + "revision": "5c59e41555244b7e45c9a6be2d720ab4bafae558", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=( + "2000-01-01", + "2020-12-31", + ), # since it is news it is guessed that it is from 2000 to 2020 + form=["written"], + domains=["News", "Non-fiction"], + task_subtypes=None, + license="cc-by-sa-4.0", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=None, # none found + n_samples={"test": 10275}, # due to duplicates + avg_character_length={"test": 2641.03}, + ) + + def dataset_transform(self) -> None: + ds = convert_to_fast(self.dataset, self.seed) # type: ignore + self.dataset = ds diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/TenKGnadClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/TenKGnadClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..6edfd876a12e12e14cbf38b07b06d9029cee49cf --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/TenKGnadClusteringS2S.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast, convert_to_fast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TenKGnadClusteringS2S(AbsTaskClustering): + superseeded_by = "TenKGnadClusteringS2S.v2" + + metadata = TaskMetadata( + name="TenKGnadClusteringS2S", + description="Clustering of news article titles. Clustering of 10 splits on the news article category.", + reference="https://tblock.github.io/10kGNAD/", + dataset={ + "path": "slvnwhrl/tenkgnad-clustering-s2s", + "revision": "6cddbe003f12b9b140aec477b583ac4191f01786", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 45914}, + avg_character_length={"test": 50.96}, + ) + + +class TenKGnadClusteringS2SFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="TenKGnadClusteringS2S.v2", + description="Clustering of news article titles. Clustering of 10 splits on the news article category.", + reference="https://tblock.github.io/10kGNAD/", + dataset={ + "path": "slvnwhrl/tenkgnad-clustering-s2s", + "revision": "6cddbe003f12b9b140aec477b583ac4191f01786", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="v_measure", + date=( + "2000-01-01", + "2020-12-31", + ), # since it is news it is guessed that it is from 2000 to 2020 + form=["written"], + domains=["News", "Non-fiction"], + task_subtypes=None, + license="cc-by-sa-4.0", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=None, # none found + n_samples={"test": 10275}, # due to duplicates + avg_character_length={"test": 50.96}, + ) + + def dataset_transform(self) -> None: + ds = convert_to_fast(self.dataset, self.seed) # type: ignore + self.dataset = ds diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/deu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArXivHierarchicalClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArXivHierarchicalClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..b92c7306409883695b3b88d8e6288df31562165c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArXivHierarchicalClustering.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import itertools + +from datasets import Dataset, DatasetDict +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2048 + + +def split_labels(record: dict) -> dict: + record["labels"] = record["labels"].split(".") + return record + + +class ArXivHierarchicalClusteringP2P(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="ArXivHierarchicalClusteringP2P", + description="Clustering of titles+abstract from arxiv. Clustering of 30 sets, either on the main or secondary category", + reference="https://www.kaggle.com/Cornell-University/arxiv", + dataset={ + "path": "mteb/arxiv-clustering-p2p", + "revision": "0bbdb47bcbe3a90093699aefeed338a0f28a7ee8", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("1991-01-01", "2021-01-01"), # 1991-01-01 is the first arxiv paper + form=["written"], + domains=["Academic"], + task_subtypes=[], + license="CC0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=["Thematic clustering"], + text_creation="found", + bibtex_citation="@misc{arXiv.org e-Print archive, url={https://arxiv.org/} }", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 1009.98}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.dataset.map(split_labels) + self.dataset["test"] = self.dataset["test"].train_test_split( + test_size=N_SAMPLES, seed=self.seed + )["test"] + + +class ArXivHierarchicalClusteringS2S(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="ArXivHierarchicalClusteringS2S", + description="Clustering of titles from arxiv. Clustering of 30 sets, either on the main or secondary category", + reference="https://www.kaggle.com/Cornell-University/arxiv", + dataset={ + "path": "mteb/arxiv-clustering-s2s", + "revision": "b73bd54100e5abfa6e3a23dcafb46fe4d2438dc3", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("1991-01-01", "2021-01-01"), # 1991-01-01 is the first arxiv paper + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering"], + license="CC0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="@misc{arXiv.org e-Print archive, url={https://arxiv.org/} }", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 1009.98}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.dataset.map(split_labels) + self.dataset["test"] = self.dataset["test"].train_test_split( + test_size=N_SAMPLES, seed=self.seed + )["test"] diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArxivClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArxivClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1a29444902b6c2a88fb8567de7fefcae088b56 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArxivClusteringP2P.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import clustering_downsample +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class ArxivClusteringP2P(AbsTaskClustering): + superseeded_by = "ArXivHierarchicalClusteringP2P" + + metadata = TaskMetadata( + name="ArxivClusteringP2P", + description="Clustering of titles+abstract from arxiv. Clustering of 30 sets, either on the main or secondary category", + reference="https://www.kaggle.com/Cornell-University/arxiv", + dataset={ + "path": "mteb/arxiv-clustering-p2p", + "revision": "a122ad7f3f0291bf49cc6f4d32aa80929df69d5d", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 732723}, + avg_character_length={"test": 1009.98}, + ) + + +class ArxivClusteringP2PFast(AbsTaskClustering): + superseeded_by = "ArXivHierarchicalClusteringP2P" + # a faster version of the dataset, since it does not sample from the same distribution we can't use the AbsTaskClusteringFast, instead we + # simply downsample each cluster. + + metadata = TaskMetadata( + name="ArxivClusteringP2P.v2", + description="Clustering of titles+abstract from arxiv. Clustering of 30 sets, either on the main or secondary category", + reference="https://www.kaggle.com/Cornell-University/arxiv", + dataset={ + "path": "mteb/arxiv-clustering-p2p", + "revision": "a122ad7f3f0291bf49cc6f4d32aa80929df69d5d", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("1991-01-01", "2021-01-01"), # 1991-01-01 is the first arxiv paper + form=["written"], + domains=["Academic"], + task_subtypes=[], + license="CC0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=None, # None found + n_samples={"test": 250_000}, + avg_character_length={"test": 1009.98}, + ) + + def dataset_transform(self): + ds = clustering_downsample(self.dataset, self.seed) + self.dataset = ds diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArxivClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArxivClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..380f0aae0a70e6659723f0e8cafad633468ce3e3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/ArxivClusteringS2S.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskClustering import AbsTaskClustering + + +class ArxivClusteringS2S(AbsTaskClustering): + superseeded_by = "ArXivHierarchicalClusteringS2S" + metadata = TaskMetadata( + name="ArxivClusteringS2S", + description="Clustering of titles from arxiv. Clustering of 30 sets, either on the main or secondary category", + reference="https://www.kaggle.com/Cornell-University/arxiv", + dataset={ + "path": "mteb/arxiv-clustering-s2s", + "revision": "f910caf1a6075f7329cdf8c1a6135696f37dbd53", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 732723}, + avg_character_length={"test": 74}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BigPatentClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BigPatentClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a3fe3827769efea61988c59c235854584f6565 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BigPatentClustering.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +NUM_SAMPLES = 2048 + + +class BigPatentClustering(AbsTaskClustering): + superseeded_by = "BigPatentClustering.v2" + + metadata = TaskMetadata( + name="BigPatentClustering", + description="Clustering of documents from the Big Patent dataset. Test set only includes documents" + "belonging to a single category, with a total of 9 categories.", + reference="https://www.kaggle.com/datasets/big_patent", + dataset={ + "path": "jinaai/big-patent-clustering", + "revision": "62d5330920bca426ce9d3c76ea914f15fc83e891", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class BigPatentClusteringFast(AbsTaskClusteringFast): + max_depth = 1 + metadata = TaskMetadata( + name="BigPatentClustering.v2", + description="Clustering of documents from the Big Patent dataset. Test set only includes documents" + "belonging to a single category, with a total of 9 categories.", + reference="https://huggingface.co/datasets/NortheasternUniversity/big_patent", + dataset={ + "path": "mteb/big-patent", + "revision": "58a863a958586a5d6ba51088b94ac74a46aa864f", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=( + "1971-01-01", + "2019-06-10", + ), # start date from paper, end date - paper publication + form=["written"], + domains=["Legal"], + task_subtypes=["Thematic clustering"], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{DBLP:journals/corr/abs-1906-03741, + author = {Eva Sharma and + Chen Li and + Lu Wang}, + title = {{BIGPATENT:} {A} Large-Scale Dataset for Abstractive and Coherent + Summarization}, + journal = {CoRR}, + volume = {abs/1906.03741}, + year = {2019}, + url = {http://arxiv.org/abs/1906.03741}, + eprinttype = {arXiv}, + eprint = {1906.03741}, + timestamp = {Wed, 26 Jun 2019 07:14:58 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1906-03741.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +}""", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={"test": 30995.5}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BiorxivClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BiorxivClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..eee6427686e3f08eb835b3af2e9669fd387facf0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BiorxivClusteringP2P.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class BiorxivClusteringP2PFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="BiorxivClusteringP2P.v2", + description="Clustering of titles+abstract from biorxiv across 26 categories.", + reference="https://api.biorxiv.org/", + dataset={ + "path": "mteb/biorxiv-clustering-p2p", + "revision": "f5dbc242e11dd8e24def4c4268607a49e02946dc", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering"], + license="https://www.biorxiv.org/content/about-biorxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 2048}, + avg_character_length={"test": 1664.0}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=2048, + ) + + +class BiorxivClusteringP2P(AbsTaskClustering): + superseeded_by = "BiorxivClusteringP2P.v2" + metadata = TaskMetadata( + name="BiorxivClusteringP2P", + description="Clustering of titles+abstract from biorxiv. Clustering of 10 sets, based on the main category.", + reference="https://api.biorxiv.org/", + dataset={ + "path": "mteb/biorxiv-clustering-p2p", + "revision": "65b79d1d13f80053f67aca9498d9402c2d9f1f40", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering"], + license="https://www.biorxiv.org/content/about-biorxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 75000}, + avg_character_length={"test": 1666.2}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BiorxivClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BiorxivClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..a0fc9c39a6953345fa359ce0df3d88bfb738e2a1 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/BiorxivClusteringS2S.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class BiorxivClusteringS2SFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="BiorxivClusteringS2S.v2", + description="Clustering of titles from biorxiv across 26 categories.", + reference="https://api.biorxiv.org/", + dataset={ + "path": "mteb/biorxiv-clustering-s2s", + "revision": "eb4edb10386758d274cd161093eb351381a16dbf", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering"], + license="https://www.biorxiv.org/content/about-biorxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 2048}, + avg_character_length={"test": 101.7}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=2048, + ) + + +class BiorxivClusteringS2S(AbsTaskClustering): + superseeded_by = "BiorxivClusteringS2S.v2" + metadata = TaskMetadata( + name="BiorxivClusteringS2S", + description="Clustering of titles from biorxiv. Clustering of 10 sets, based on the main category.", + reference="https://api.biorxiv.org/", + dataset={ + "path": "mteb/biorxiv-clustering-s2s", + "revision": "258694dd0231531bc1fd9de6ceb52a0853c6d908", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering"], + license="https://www.biorxiv.org/content/about-biorxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 75000}, + avg_character_length={"test": 101.6}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/MedrxivClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/MedrxivClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..da577659c238b157ae0d6713f48c79b3be1b76d8 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/MedrxivClusteringP2P.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class MedrxivClusteringP2PFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="MedrxivClusteringP2P.v2", + description="Clustering of titles+abstract from medrxiv across 51 categories.", + reference="https://api.medrxiv.org/", + dataset={ + "path": "mteb/medrxiv-clustering-p2p", + "revision": "9894e30672c61db02f10a8593519d84e2b7a1a1c", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic", "Medical"], + task_subtypes=["Thematic clustering"], + license="https://www.medrxiv.org/content/about-medrxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 2048}, + avg_character_length={"test": 1984.7}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=2048, + ) + + +class MedrxivClusteringP2P(AbsTaskClustering): + superseeded_by = "MedrxivClusteringP2P.v2" + metadata = TaskMetadata( + name="MedrxivClusteringP2P", + description="Clustering of titles+abstract from medrxiv. Clustering of 10 sets, based on the main category.", + reference="https://api.medrxiv.org/", + dataset={ + "path": "mteb/medrxiv-clustering-p2p", + "revision": "e7a26af6f3ae46b30dde8737f02c07b1505bcc73", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering"], + license="https://www.medrxiv.org/content/about-medrxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 375000}, + avg_character_length={"test": 1981.2}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/MedrxivClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/MedrxivClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..9d648edc06c7913d39fbac363a93e2003aa53cfd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/MedrxivClusteringS2S.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class MedrxivClusteringS2SFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="MedrxivClusteringS2S.v2", + description="Clustering of titles from medrxiv across 51 categories.", + reference="https://api.medrxiv.org/", + dataset={ + "path": "mteb/medrxiv-clustering-s2s", + "revision": "ec20c81676a749c0f06fb4a9397fc7e168521458", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic", "Medical"], + task_subtypes=["Thematic clustering"], + license="https://www.medrxiv.org/content/about-medrxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 2048}, + avg_character_length={"test": 114.9}, + ) + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=2048, + ) + + +class MedrxivClusteringS2S(AbsTaskClustering): + superseeded_by = "MedrxivClusteringS2S.v2" + metadata = TaskMetadata( + name="MedrxivClusteringS2S", + description="Clustering of titles from medrxiv. Clustering of 10 sets, based on the main category.", + reference="https://api.medrxiv.org/", + dataset={ + "path": "mteb/medrxiv-clustering-s2s", + "revision": "35191c8c0dca72d8ff3efcd72aa802307d469663", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2022-05-10"), + form=["written"], + domains=["Academic", "Medical"], + task_subtypes=["Thematic clustering"], + license="https://www.medrxiv.org/content/about-medrxiv", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", + n_samples={"test": 375000}, + avg_character_length={"test": 114.7}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/RedditClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/RedditClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..909b7c52c4048cc82fd1c8ab00e74791a0f01d7d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/RedditClustering.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import itertools + +from datasets import Dataset, DatasetDict + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskClustering import AbsTaskClustering +from ....abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast + + +class RedditFastClusteringS2S(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="RedditFastClusteringS2S", + description="Clustering of titles from 199 subreddits. Clustering of 25 sets, each with 10-50 classes, and each class with 100 - 1000 sentences.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/reddit-clustering", + "revision": "24640382cdbf8abc73003fb0fa6d111a705499eb", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2021-04-14"), + form=["written"], + domains=["Web", "Social"], + task_subtypes=["Thematic clustering"], + license="Not specified", # derived from pushshift + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{geigle:2021:arxiv, + author = {Gregor Geigle and + Nils Reimers and + Andreas R{\"u}ckl{\'e} and + Iryna Gurevych}, + title = {TWEAC: Transformer with Extendable QA Agent Classifiers}, + journal = {arXiv preprint}, + volume = {abs/2104.07081}, + year = {2021}, + url = {http://arxiv.org/abs/2104.07081}, + archivePrefix = {arXiv}, + eprint = {2104.07081} + }""", + n_samples={"test": 16000}, + avg_character_length={"test": 64.7}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=16000, + ) + + +class RedditClustering(AbsTaskClustering): + superseeded_by = "RedditFastClusteringS2S" + metadata = TaskMetadata( + name="RedditClustering", + description="Clustering of titles from 199 subreddits. Clustering of 25 sets, each with 10-50 classes, and each class with 100 - 1000 sentences.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/reddit-clustering", + "revision": "24640382cdbf8abc73003fb0fa6d111a705499eb", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 420464}, + avg_character_length={"test": 64.7}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/RedditClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/RedditClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..ffd818bde81a21333c5b0b1cfd1d72033e5a02ae --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/RedditClusteringP2P.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +import itertools + +import numpy as np +from datasets import Dataset, DatasetDict + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskClustering import AbsTaskClustering +from ....abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast + + +class RedditClusteringP2P(AbsTaskClustering): + metadata = TaskMetadata( + name="RedditClusteringP2P", + description="Clustering of title+posts from reddit. Clustering of 10 sets of 50k paragraphs and 40 sets of 10k paragraphs.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/reddit-clustering-p2p", + "revision": "385e3cb46b4cfa89021f56c4380204149d0efe33", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 459399}, + avg_character_length={"test": 727.7}, + ) + + +class RedditFastClusteringP2P(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="RedditFastClusteringP2P", + description="Clustering of title+posts from reddit. Clustering of 10 sets of 50k paragraphs and 40 sets of 10k paragraphs.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/reddit-clustering-p2p", + "revision": "385e3cb46b4cfa89021f56c4380204149d0efe33", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2021-04-14"), + form=["written"], + domains=["Web", "Social"], + task_subtypes=["Thematic clustering"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{geigle:2021:arxiv, + author = {Gregor Geigle and + Nils Reimers and + Andreas R{\"u}ckl{\'e} and + Iryna Gurevych}, + title = {TWEAC: Transformer with Extendable QA Agent Classifiers}, + journal = {arXiv preprint}, + volume = {abs/2104.07081}, + year = {2021}, + url = {http://arxiv.org/abs/2104.07081}, + archivePrefix = {arXiv}, + eprint = {2104.07081} + }""", + n_samples={"test": 16000}, + avg_character_length={"test": 727.7}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + + # Remove sentences and labels with only 1 label example. + unique_labels, counts = np.unique(labels, return_counts=True) + solo_label_idx = np.where(counts == 1) + solo_labels = unique_labels[solo_label_idx] + for solo_label in solo_labels: + loc = labels.index(solo_label) + labels.pop(loc) + sentences.pop(loc) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=16000, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/StackExchangeClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/StackExchangeClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..d861c938fe10eaa08778ec928615df04c22f8e89 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/StackExchangeClustering.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import itertools + +from datasets import Dataset, DatasetDict + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskClustering import AbsTaskClustering +from ....abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast + + +class StackExchangeClusteringFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="StackExchangeClustering.v2", + description="Clustering of titles from 121 stackexchanges. Clustering of 25 sets, each with 10-50 classes, and each class with 100 - 1000 sentences.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/stackexchange-clustering", + "revision": "6cbc1f7b2bc0622f2e39d2c77fa502909748c259", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2021-04-14"), + form=["written"], + domains=["Web"], + task_subtypes=["Thematic clustering"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{geigle:2021:arxiv, + author = {Gregor Geigle and + Nils Reimers and + Andreas R{\"u}ckl{\'e} and + Iryna Gurevych}, + title = {TWEAC: Transformer with Extendable QA Agent Classifiers}, + journal = {arXiv preprint}, + volume = {abs/2104.07081}, + year = {2021}, + url = {http://arxiv.org/abs/2104.07081}, + archivePrefix = {arXiv}, + eprint = {2104.07081} + }""", + n_samples={"test": 16000}, + avg_character_length={"test": 57.0}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=16000, + ) + + +class StackExchangeClustering(AbsTaskClustering): + superseeded_by = "StackExchangeClustering.v2" + metadata = TaskMetadata( + name="StackExchangeClustering", + description="Clustering of titles from 121 stackexchanges. Clustering of 25 sets, each with 10-50 classes, and each class with 100 - 1000 sentences.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/stackexchange-clustering", + "revision": "6cbc1f7b2bc0622f2e39d2c77fa502909748c259", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 373850}, + avg_character_length={"test": 57.0}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/StackExchangeClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/StackExchangeClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..f18ac65ed773fe74fcd1c7c5b585dd8478cec925 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/StackExchangeClusteringP2P.py @@ -0,0 +1,109 @@ +from __future__ import annotations + +import itertools + +import numpy as np +from datasets import Dataset, DatasetDict + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskClustering import AbsTaskClustering +from ....abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast + + +class StackExchangeClusteringP2PFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="StackExchangeClusteringP2P.v2", + description="Clustering of title+body from stackexchange. Clustering of 5 sets of 10k paragraphs and 5 sets of 5k paragraphs.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/stackexchange-clustering-p2p", + "revision": "815ca46b2622cec33ccafc3735d572c266efdb44", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("2021-01-01", "2021-04-14"), + form=["written"], + domains=["Web"], + task_subtypes=["Thematic clustering"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{geigle:2021:arxiv, + author = {Gregor Geigle and + Nils Reimers and + Andreas R{\"u}ckl{\'e} and + Iryna Gurevych}, + title = {TWEAC: Transformer with Extendable QA Agent Classifiers}, + journal = {arXiv preprint}, + volume = {abs/2104.07081}, + year = {2021}, + url = {http://arxiv.org/abs/2104.07081}, + archivePrefix = {arXiv}, + eprint = {2104.07081} + }""", + n_samples={"test": 16000}, + avg_character_length={"test": 1090.7}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + + # Remove sentences and labels with only 1 label example. + unique_labels, counts = np.unique(labels, return_counts=True) + solo_label_idx = np.where(counts == 1) + solo_labels = unique_labels[solo_label_idx] + for solo_label in solo_labels: + loc = labels.index(solo_label) + labels.pop(loc) + sentences.pop(loc) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=16000, + ) + + +class StackExchangeClusteringP2P(AbsTaskClustering): + superseeded_by = "StackExchangeClusteringP2P.v2" + metadata = TaskMetadata( + name="StackExchangeClusteringP2P", + description="Clustering of title+body from stackexchange. Clustering of 5 sets of 10k paragraphs and 5 sets of 5k paragraphs.", + reference="https://arxiv.org/abs/2104.07081", + dataset={ + "path": "mteb/stackexchange-clustering-p2p", + "revision": "815ca46b2622cec33ccafc3735d572c266efdb44", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 75000}, + avg_character_length={"test": 1090.7}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/TwentyNewsgroupsClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/TwentyNewsgroupsClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..4a1750bfe1574bfe3d08ffbee3bdf22269770f7f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/TwentyNewsgroupsClustering.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +import itertools + +from datasets import Dataset, DatasetDict + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class TwentyNewsgroupsClustering(AbsTaskClustering): + superseeded_by = "TwentyNewsgroupsClustering.v2" + metadata = TaskMetadata( + name="TwentyNewsgroupsClustering", + description="Clustering of the 20 Newsgroups dataset (subject only).", + reference="https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html", + dataset={ + "path": "mteb/twentynewsgroups-clustering", + "revision": "6125ec4e24fa026cec8a478383ee943acfbd5449", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("1995-01-01", "1995-01-01"), + form=["written"], + domains=["News"], + task_subtypes=["Thematic clustering"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@incollection{LANG1995331, + title = {NewsWeeder: Learning to Filter Netnews}, + editor = {Armand Prieditis and Stuart Russell}, + booktitle = {Machine Learning Proceedings 1995}, + publisher = {Morgan Kaufmann}, + address = {San Francisco (CA)}, + pages = {331-339}, + year = {1995}, + isbn = {978-1-55860-377-6}, + doi = {https://doi.org/10.1016/B978-1-55860-377-6.50048-7}, + url = {https://www.sciencedirect.com/science/article/pii/B9781558603776500487}, + author = {Ken Lang}, + } + """, + n_samples={"test": 59545}, + avg_character_length={"test": 32.0}, + ) + + +class TwentyNewsgroupsClusteringFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="TwentyNewsgroupsClustering.v2", + description="Clustering of the 20 Newsgroups dataset (subject only).", + reference="https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html", + dataset={ + "path": "mteb/twentynewsgroups-clustering", + "revision": "6125ec4e24fa026cec8a478383ee943acfbd5449", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=("1995-01-01", "1995-01-01"), + form=["written"], + domains=["News"], + task_subtypes=["Thematic clustering"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@incollection{LANG1995331, + title = {NewsWeeder: Learning to Filter Netnews}, + editor = {Armand Prieditis and Stuart Russell}, + booktitle = {Machine Learning Proceedings 1995}, + publisher = {Morgan Kaufmann}, + address = {San Francisco (CA)}, + pages = {331-339}, + year = {1995}, + isbn = {978-1-55860-377-6}, + doi = {https://doi.org/10.1016/B978-1-55860-377-6.50048-7}, + url = {https://www.sciencedirect.com/science/article/pii/B9781558603776500487}, + author = {Ken Lang}, + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 32.0}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + label="labels", + n_samples=2048, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/WikiCitiesClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/WikiCitiesClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..1516633c47d00b8d510e108220692ecaea385533 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/WikiCitiesClustering.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskClustering import AbsTaskClustering + + +class WikiCitiesClustering(AbsTaskClustering): + metadata = TaskMetadata( + name="WikiCitiesClustering", + description="Clustering of Wikipedia articles of cities by country from https://huggingface.co/datasets/wikipedia. Test set includes 126 countries, and a total of 3531 cities.", + reference="https://huggingface.co/datasets/wikipedia", + dataset={ + "path": "jinaai/cities_wiki_clustering", + "revision": "ddc9ee9242fa65332597f70e967ecc38b9d734fa", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/eng/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/AlloProfClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/AlloProfClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..7575d540f6f768fd08592a0f9c2a94b825237cff --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/AlloProfClusteringP2P.py @@ -0,0 +1,117 @@ +import datasets +import numpy as np + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class AlloProfClusteringP2P(AbsTaskClustering): + superseeded_by = "AlloProfClusteringP2P.v2" + + metadata = TaskMetadata( + name="AlloProfClusteringP2P", + description="Clustering of document titles and descriptions from Allo Prof dataset. Clustering of 10 sets on the document topic.", + reference="https://huggingface.co/datasets/lyon-nlp/alloprof", + dataset={ + "path": "lyon-nlp/alloprof", + "revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "name": "documents", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def create_description(self, example): + example["text"] = example["title"] + " " + example["text"] + return example + + def dataset_transform(self): + """Convert to standard format""" + self.dataset = self.dataset.remove_columns("uuid") + self.dataset = self.dataset.map(self.create_description) + texts = self.dataset["documents"]["text"] + topics = self.dataset["documents"]["topic"] + new_format = { + "sentences": [split.tolist() for split in np.array_split(texts, 10)], + "labels": [split.tolist() for split in np.array_split(topics, 10)], + } + self.dataset["test"] = datasets.Dataset.from_dict(new_format) + self.dataset.pop("documents") + + +class AlloProfClusteringP2PFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="AlloProfClusteringP2P.v2", + description="Clustering of document titles and descriptions from Allo Prof dataset. Clustering of 10 sets on the document topic.", + reference="https://huggingface.co/datasets/lyon-nlp/alloprof", + dataset={ + "path": "lyon-nlp/alloprof", + "revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "name": "documents", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="v_measure", + # (date of founding of the dataset source site, date of dataset paper publication) + date=("1996-01-01", "2023-04-14"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Thematic clustering"], + license="mit", + socioeconomic_status="medium", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{lef23, + doi = {10.48550/ARXIV.2302.07738}, + url = {https://arxiv.org/abs/2302.07738}, + author = {Lefebvre-Brossard, Antoine and Gazaille, Stephane and Desmarais, Michel C.}, + keywords = {Computation and Language (cs.CL), Information Retrieval (cs.IR), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Alloprof: a new French question-answer education dataset and its use in an information retrieval case study}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International} +} +""", + n_samples={"test": 2556}, + avg_character_length={"test": 3539.5}, + ) + + def create_description(self, example): + example["sentences"] = example["title"] + " " + example["text"] + return example + + def dataset_transform(self): + self.dataset["test"] = ( + self.dataset["documents"] + .map(self.create_description) + .rename_columns({"topic": "labels"}) + .select_columns(["sentences", "labels"]) + ) + self.dataset.pop("documents") + unique_labels = list(set(self.dataset["test"]["labels"])) + unique_labels.sort() + self.dataset["test"] = self.dataset["test"].cast( + datasets.Features( + sentences=datasets.Value("string"), + labels=datasets.ClassLabel(names=unique_labels), + ) + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/AlloProfClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/AlloProfClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..08297e47bf47911d765532341e010385c0cac7f5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/AlloProfClusteringS2S.py @@ -0,0 +1,110 @@ +import datasets +import numpy as np + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class AlloProfClusteringS2S(AbsTaskClustering): + superseeded_by = "AlloProfClusteringS2S.v2" + + metadata = TaskMetadata( + name="AlloProfClusteringS2S", + description="Clustering of document titles from Allo Prof dataset. Clustering of 10 sets on the document topic.", + reference="https://huggingface.co/datasets/lyon-nlp/alloprof", + dataset={ + "path": "lyon-nlp/alloprof", + "revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "name": "documents", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def dataset_transform(self): + """Convert to standard format""" + self.dataset = self.dataset.remove_columns("uuid") + self.dataset = self.dataset.remove_columns("text") + titles = self.dataset["documents"]["title"] + topics = self.dataset["documents"]["topic"] + new_format = { + "sentences": [split.tolist() for split in np.array_split(titles, 10)], + "labels": [split.tolist() for split in np.array_split(topics, 10)], + } + self.dataset["test"] = datasets.Dataset.from_dict(new_format) + self.dataset.pop("documents") + + +class AlloProfClusteringS2SFast(AbsTaskClusteringFast): + max_depth = 1 + + metadata = TaskMetadata( + name="AlloProfClusteringS2S.v2", + description="Clustering of document titles from Allo Prof dataset. Clustering of 10 sets on the document topic.", + reference="https://huggingface.co/datasets/lyon-nlp/alloprof", + dataset={ + "path": "lyon-nlp/alloprof", + "revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "name": "documents", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="v_measure", + # (date of founding of the dataset source site, date of dataset paper publication) + date=("1996-01-01", "2023-04-14"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Thematic clustering"], + license="mit", + socioeconomic_status="medium", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{lef23, + doi = {10.48550/ARXIV.2302.07738}, + url = {https://arxiv.org/abs/2302.07738}, + author = {Lefebvre-Brossard, Antoine and Gazaille, Stephane and Desmarais, Michel C.}, + keywords = {Computation and Language (cs.CL), Information Retrieval (cs.IR), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Alloprof: a new French question-answer education dataset and its use in an information retrieval case study}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International} +} +""", + n_samples={"test": 2556}, + avg_character_length={"test": 32.8}, + ) + + def dataset_transform(self): + self.dataset["test"] = ( + self.dataset["documents"] + .rename_columns({"title": "sentences", "topic": "labels"}) + .select_columns(["sentences", "labels"]) + ) + self.dataset.pop("documents") + unique_labels = list(set(self.dataset["test"]["labels"])) + unique_labels.sort() + self.dataset["test"] = self.dataset["test"].cast( + datasets.Features( + sentences=datasets.Value("string"), + labels=datasets.ClassLabel(names=unique_labels), + ) + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/HALClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/HALClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..e6c506518bf07c8f847c4dfeac4ada5b4dbeb0b8 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/HALClusteringS2S.py @@ -0,0 +1,113 @@ +from collections import Counter + +import datasets +import numpy as np + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +NUM_SAMPLES = 2048 + + +class HALClusteringS2S(AbsTaskClustering): + superseeded_by = "HALClusteringS2S.v2" + + metadata = TaskMetadata( + name="HALClusteringS2S", + description="Clustering of titles from HAL (https://huggingface.co/datasets/lyon-nlp/clustering-hal-s2s)", + reference="https://huggingface.co/datasets/lyon-nlp/clustering-hal-s2s", + dataset={ + "path": "lyon-nlp/clustering-hal-s2s", + "revision": "e06ebbbb123f8144bef1a5d18796f3dec9ae2915", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def dataset_transform(self): + """Convert to standard format""" + self.dataset = self.dataset.remove_columns("hal_id") + titles = self.dataset["test"]["title"] + domains = self.dataset["test"]["domain"] + new_format = { + "sentences": [split.tolist() for split in np.array_split(titles, 10)], + "labels": [split.tolist() for split in np.array_split(domains, 10)], + } + self.dataset["test"] = datasets.Dataset.from_dict(new_format) + + +class HALClusteringS2SFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="HALClusteringS2S.v2", + description="Clustering of titles from HAL (https://huggingface.co/datasets/lyon-nlp/clustering-hal-s2s)", + reference="https://huggingface.co/datasets/lyon-nlp/clustering-hal-s2s", + dataset={ + "path": "lyon-nlp/clustering-hal-s2s", + "revision": "e06ebbbb123f8144bef1a5d18796f3dec9ae2915", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="v_measure", + date=("2000-03-29", "2024-05-24"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering"], + license="Apache-2.0", + socioeconomic_status="medium", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={"test": 86.6}, + ) + + def dataset_transform(self): + """Convert to standard format""" + self.dataset["test"] = self.dataset["test"].remove_columns("hal_id") + self.dataset["test"] = self.dataset["test"].rename_columns( + {"title": "sentences", "domain": "labels"} + ) + labels_count = Counter(self.dataset["test"]["labels"]) + + # keep classes with more than 2 samples after stratified_subsampling + frequent_labels = set( + label + for label, count in labels_count.items() + if count > len(self.dataset["test"]) * 2 / NUM_SAMPLES + ) + self.dataset["test"] = self.dataset["test"].filter( + lambda row: row["labels"] in frequent_labels + ) + self.dataset["test"] = self.dataset["test"].cast( + datasets.Features( + sentences=datasets.Value("string"), + labels=datasets.ClassLabel(names=sorted(list(frequent_labels))), + ) + ) + + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/fra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/LivedoorNewsClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/LivedoorNewsClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..265846168144f2435825a88393e94314a8868e5a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/LivedoorNewsClustering.py @@ -0,0 +1,37 @@ +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class LivedoorNewsClustering(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="LivedoorNewsClustering", + description="Clustering of the news reports of a Japanese news site, Livedoor News by RONDHUIT Co, Ltd. in 2012. It contains over 7,000 news report texts across 9 categories (topics).", + reference="https://github.com/sbintuitions/JMTEB", + dataset={ + "path": "sbintuitions/JMTEB", + "name": "livedoor_news", + "revision": "e4af6c73182bebb41d94cb336846e5a452454ea7", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["jpn-Jpan"], + main_score="v_measure", + date=("2000-01-01", "2014-02-09"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="cc-by-nd-2.1-jp", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": 1107}, + avg_character_length={"test": 1082.61}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"text": "sentences", "label": "labels"} + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/MewsC16JaClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/MewsC16JaClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..a38568c3c65270dbc007fd591e3c6e82efa61e19 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/MewsC16JaClustering.py @@ -0,0 +1,57 @@ +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class MewsC16JaClustering(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="MewsC16JaClustering", + description="""MewsC-16 (Multilingual Short Text Clustering Dataset for News in 16 languages) is constructed from Wikinews. + This dataset is the Japanese split of MewsC-16, containing topic sentences from Wikinews articles in 12 categories. + More detailed information is available in the Appendix E of the citation.""", + reference="https://github.com/sbintuitions/JMTEB", + dataset={ + "path": "sbintuitions/JMTEB", + "name": "mewsc16_ja", + "revision": "e4af6c73182bebb41d94cb336846e5a452454ea7", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["jpn-Jpan"], + main_score="v_measure", + date=("2002-01-01", "2019-01-31"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="cc-by-sa-4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{ + nishikawa-etal-2022-ease, + title = "{EASE}: Entity-Aware Contrastive Learning of Sentence Embedding", + author = "Nishikawa, Sosuke and + Ri, Ryokan and + Yamada, Ikuya and + Tsuruoka, Yoshimasa and + Echizen, Isao", + booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", + month = jul, + year = "2022", + address = "Seattle, United States", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.naacl-main.284", + pages = "3870--3885", + abstract = "We present EASE, a novel method for learning sentence embeddings via contrastive learning between sentences and their related entities.The advantage of using entity supervision is twofold: (1) entities have been shown to be a strong indicator of text semantics and thus should provide rich training signals for sentence embeddings; (2) entities are defined independently of languages and thus offer useful cross-lingual alignment supervision.We evaluate EASE against other unsupervised models both in monolingual and multilingual settings.We show that EASE exhibits competitive or better performance in English semantic textual similarity (STS) and short text clustering (STC) tasks and it significantly outperforms baseline methods in multilingual settings on a variety of tasks.Our source code, pre-trained models, and newly constructed multi-lingual STC dataset are available at https://github.com/studio-ousia/ease.", + } + """, + n_samples={"test": 992}, + avg_character_length={"test": 95}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"text": "sentences", "label": "labels"} + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/jpn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/IndicReviewsClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/IndicReviewsClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..3584d5917a7c8a303a7aa52d6e249d7858353733 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/IndicReviewsClusteringP2P.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +from typing import Any + +import datasets +import numpy as np + +from mteb.abstasks import AbsTaskClustering, MultilingualTask +from mteb.abstasks.TaskMetadata import TaskMetadata + +_LANGUAGES = { + "as": ["asm-Beng"], + "bd": ["brx-Deva"], + "bn": ["ben-Beng"], + "gu": ["guj-Gujr"], + "hi": ["hin-Deva"], + "kn": ["kan-Knda"], + "ml": ["mal-Mlym"], + "mr": ["mar-Deva"], + "or": ["ory-Orya"], + "pa": ["pan-Guru"], + "ta": ["tam-Taml"], + "te": ["tel-Telu"], + "ur": ["urd-Arab"], +} + + +class IndicReviewsClusteringP2P(AbsTaskClustering, MultilingualTask): + metadata = TaskMetadata( + name="IndicReviewsClusteringP2P", + dataset={ + "path": "ai4bharat/IndicSentiment", + "revision": "ccb472517ce32d103bba9d4f5df121ed5a6592a4", + }, + description="Clustering of reviews from IndicSentiment dataset. Clustering of 14 sets on the generic categories label.", + reference="https://arxiv.org/abs/2212.05409", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="v_measure", + date=("2022-08-01", "2022-12-20"), + form=["written"], + domains=["Reviews"], + task_subtypes=["Thematic clustering"], + license="CC0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="machine-translated and verified", + bibtex_citation="""@article{doddapaneni2022towards, + title = {Towards Leaving No Indic Language Behind: Building Monolingual Corpora, Benchmark and Models for Indic Languages}, + author = {Sumanth Doddapaneni and Rahul Aralikatte and Gowtham Ramesh and Shreyansh Goyal and Mitesh M. Khapra and Anoop Kunchukuttan and Pratyush Kumar}, + journal = {Annual Meeting of the Association for Computational Linguistics}, + year = {2022}, + doi = {10.18653/v1/2023.acl-long.693} +}""", + n_samples={"test": 1000}, + avg_character_length={"test": 137.6}, + ) + + def load_data(self, **kwargs: Any) -> None: + """Load dataset from HuggingFace hub""" + if self.data_loaded: + return + self.dataset = {} + for lang in self.hf_subsets: + self.dataset[lang] = datasets.load_dataset( + name=f"translation-{lang}", + **self.metadata_dict["dataset"], + ) + self.dataset_transform() + self.data_loaded = True + + def dataset_transform(self) -> None: + for lang in self.hf_subsets: + self.dataset[lang].pop("validation") + + texts = self.dataset[lang]["test"]["INDIC REVIEW"] + labels = self.dataset[lang]["test"]["GENERIC CATEGORIES"] + + new_format = { + "sentences": [split.tolist() for split in np.array_split(texts, 5)], + "labels": [split.tolist() for split in np.array_split(labels, 5)], + } + self.dataset[lang]["test"] = datasets.Dataset.from_dict(new_format) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MLSUMClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MLSUMClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..09621559cd4c5af19c80c2b024a4784327b59648 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MLSUMClusteringP2P.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import datasets +import numpy as np + +from mteb.abstasks import AbsTaskClustering, MultilingualTask, TaskMetadata + +_LANGUAGES = { + "de": ["deu-Latn"], + "fr": ["fra-Latn"], + "ru": ["rus-Cyrl"], + "es": ["spa-Latn"], +} +# Did not include turkish (tu) samples because all `topics` values are set to "unknown". +# Which results in a v-measure of 1 as all texts are considered to be in one cluster. + + +class MLSUMClusteringP2P(AbsTaskClustering, MultilingualTask): + metadata = TaskMetadata( + name="MLSUMClusteringP2P", + description="Clustering of newspaper article contents and titles from MLSUM dataset. Clustering of 10 sets on the newpaper article topics.", + reference="https://huggingface.co/datasets/mlsum", + dataset={ + "path": "reciTAL/mlsum", + "revision": "b5d54f8f3b61ae17845046286940f03c6bc79bc7", + "trust_remote_code": True, + }, + type="Clustering", + category="p2p", + eval_splits=["validation", "test"], + eval_langs=_LANGUAGES, + main_score="v_measure", + date=("2010-01-01", "2018-09-30"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{scialom2020mlsum, + title={MLSUM: The Multilingual Summarization Corpus}, + author={Scialom, Thomas and Dray, Paul-Alexis and Lamprier, Sylvain and Piwowarski, Benjamin and Staiano, Jacopo}, + journal={arXiv preprint arXiv:2004.14900}, + year={2020} + }""", + n_samples={"validation": 38561, "test": 41206}, + avg_character_length={"validation": 4613, "test": 4810}, + ) + + def load_data(self, **kwargs): + """Load dataset from HuggingFace hub and convert it to the standard format.""" + if self.data_loaded: + return + self.dataset = {} + for lang in self.hf_subsets: + self.dataset[lang] = datasets.load_dataset( + name=lang, + **self.metadata_dict["dataset"], + ) + self.dataset_transform(lang) + self.data_loaded = True + + def _create_description(self, example): + example["text"] = example["title"] + " " + example["text"] + return example + + def dataset_transform(self, lang): + """Convert to standard format""" + _dataset = self.dataset[lang] + _dataset.pop("train") + + _dataset = _dataset.map(self._create_description) + _dataset = _dataset.remove_columns(["summary", "url", "date", "title"]) + + for eval_split in self.metadata.eval_splits: + texts = _dataset[eval_split]["text"] + topics = _dataset[eval_split]["topic"] + new_format = { + "sentences": [split.tolist() for split in np.array_split(texts, 10)], + "labels": [split.tolist() for split in np.array_split(topics, 10)], + } + _dataset[eval_split] = datasets.Dataset.from_dict(new_format) + + self.dataset[lang] = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MLSUMClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MLSUMClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..34270f7e414eb1f461826ed4dc4c6d251224560c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MLSUMClusteringS2S.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import datasets +import numpy as np + +from mteb.abstasks import AbsTaskClustering, MultilingualTask, TaskMetadata + +_LANGUAGES = { + "de": ["deu-Latn"], + "fr": ["fra-Latn"], + "ru": ["rus-Cyrl"], + "es": ["spa-Latn"], +} +# Did not include turkish (tu) samples because all `topics` values are set to "unknown". +# Which results in a v-measure of 1 as all texts are considered to be in one cluster. + + +class MLSUMClusteringS2S(AbsTaskClustering, MultilingualTask): + metadata = TaskMetadata( + name="MLSUMClusteringS2S", + description="Clustering of newspaper article contents and titles from MLSUM dataset. Clustering of 10 sets on the newpaper article topics.", + reference="https://huggingface.co/datasets/mlsum", + dataset={ + "path": "reciTAL/mlsum", + "revision": "b5d54f8f3b61ae17845046286940f03c6bc79bc7", + "trust_remote_code": True, + }, + type="Clustering", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=_LANGUAGES, + main_score="v_measure", + date=("2010-01-01", "2018-09-30"), + form=["written"], + domains=["News"], + task_subtypes=["Topic classification"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@article{scialom2020mlsum, + title={MLSUM: The Multilingual Summarization Corpus}, + author={Scialom, Thomas and Dray, Paul-Alexis and Lamprier, Sylvain and Piwowarski, Benjamin and Staiano, Jacopo}, + journal={arXiv preprint arXiv:2004.14900}, + year={2020} + }""", + n_samples={"validation": 38561, "test": 41206}, + avg_character_length={"validation": 4613, "test": 4810}, + ) + + def load_data(self, **kwargs): + """Load dataset from HuggingFace hub and convert it to the standard format.""" + if self.data_loaded: + return + self.dataset = {} + for lang in self.hf_subsets: + self.dataset[lang] = datasets.load_dataset( + name=lang, + **self.metadata_dict["dataset"], + ) + self.dataset_transform(lang) + self.data_loaded = True + + def dataset_transform(self, lang): + """Convert to standard format""" + _dataset = self.dataset[lang] + _dataset.pop("train") + + _dataset = _dataset.remove_columns(["summary", "url", "date", "title"]) + + for eval_split in self.metadata.eval_splits: + texts = _dataset[eval_split]["text"] + topics = _dataset[eval_split]["topic"] + new_format = { + "sentences": [split.tolist() for split in np.array_split(texts, 10)], + "labels": [split.tolist() for split in np.array_split(topics, 10)], + } + _dataset[eval_split] = datasets.Dataset.from_dict(new_format) + + self.dataset[lang] = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MasakhaNEWSClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MasakhaNEWSClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..11f0b9a026aa2917b1c87aa1fc3f51a45808e47d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MasakhaNEWSClusteringP2P.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +import datasets +import numpy as np + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClustering, MultilingualTask + +_LANGUAGES = { + "amh": ["amh-Ethi"], + "eng": ["eng-Latn"], + "fra": ["fra-Latn"], + "hau": ["hau-Latn"], + "ibo": ["ibo-Latn"], + "lin": ["lin-Latn"], + "lug": ["lug-Latn"], + "orm": ["orm-Ethi"], + "pcm": ["pcm-Latn"], + "run": ["run-Latn"], + "sna": ["sna-Latn"], + "som": ["som-Latn"], + "swa": ["swa-Latn"], + "tir": ["tir-Ethi"], + "xho": ["xho-Latn"], + "yor": ["yor-Latn"], +} + + +class MasakhaNEWSClusteringP2P(AbsTaskClustering, MultilingualTask): + metadata = TaskMetadata( + name="MasakhaNEWSClusteringP2P", + description="Clustering of news article headlines and texts from MasakhaNEWS dataset. Clustering of 10 sets on the news article label.", + reference="https://huggingface.co/datasets/masakhane/masakhanews", + dataset={ + "path": "masakhane/masakhanews", + "revision": "8ccc72e69e65f40c70e117d8b3c08306bb788b60", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + """Load dataset from HuggingFace hub and convert it to the standard format.""" + if self.data_loaded: + return + self.dataset = {} + for lang in self.hf_subsets: + self.dataset[lang] = datasets.load_dataset( + name=lang, + **self.metadata_dict["dataset"], + ) + self.dataset_transform(lang) + self.data_loaded = True + + def dataset_transform(self, lang): + """Convert to standard format""" + self.dataset[lang].pop("train") + self.dataset[lang].pop("validation") + + self.dataset[lang] = self.dataset[lang].remove_columns( + ["url", "text", "headline"] + ) + texts = self.dataset[lang]["test"]["headline_text"] + labels = self.dataset[lang]["test"]["label"] + new_format = { + "sentences": [split.tolist() for split in np.array_split(texts, 5)], + "labels": [split.tolist() for split in np.array_split(labels, 5)], + } + self.dataset[lang]["test"] = datasets.Dataset.from_dict(new_format) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MasakhaNEWSClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MasakhaNEWSClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..399ce73e9d82a11255d73cb0dc4b6c14267913bb --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/MasakhaNEWSClusteringS2S.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import datasets +import numpy as np + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskClustering, MultilingualTask + +_LANGUAGES = { + "amh": ["amh-Ethi"], + "eng": ["eng-Latn"], + "fra": ["fra-Latn"], + "hau": ["hau-Latn"], + "ibo": ["ibo-Latn"], + "lin": ["lin-Latn"], + "lug": ["lug-Latn"], + "orm": ["orm-Ethi"], + "pcm": ["pcm-Latn"], + "run": ["run-Latn"], + "sna": ["sna-Latn"], + "som": ["som-Latn"], + "swa": ["swa-Latn"], + "tir": ["tir-Ethi"], + "xho": ["xho-Latn"], + "yor": ["yor-Latn"], +} + + +class MasakhaNEWSClusteringS2S(AbsTaskClustering, MultilingualTask): + metadata = TaskMetadata( + name="MasakhaNEWSClusteringS2S", + dataset={ + "path": "masakhane/masakhanews", + "revision": "8ccc72e69e65f40c70e117d8b3c08306bb788b60", + }, + description=( + "Clustering of news article headlines from MasakhaNEWS dataset. Clustering of 10 sets on the news article label." + ), + reference="https://huggingface.co/datasets/masakhane/masakhanews", + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + """Load dataset from HuggingFace hub and convert it to the standard format.""" + if self.data_loaded: + return + self.dataset = {} + for lang in self.hf_subsets: + self.dataset[lang] = datasets.load_dataset( + name=lang, + **self.metadata_dict["dataset"], + ) + self.dataset_transform(lang) + self.data_loaded = True + + def dataset_transform(self, lang): + """Convert to standard format""" + self.dataset[lang].pop("train") + self.dataset[lang].pop("validation") + + self.dataset[lang] = self.dataset[lang].remove_columns( + ["url", "text", "headline_text"] + ) + texts = self.dataset[lang]["test"]["headline"] + labels = self.dataset[lang]["test"]["label"] + new_format = { + "sentences": [split.tolist() for split in np.array_split(texts, 5)], + "labels": [split.tolist() for split in np.array_split(labels, 5)], + } + self.dataset[lang]["test"] = datasets.Dataset.from_dict(new_format) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/SIB200ClusteringS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/SIB200ClusteringS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..7b78f3bbdad92946c9d29912b4a24b98447f2f32 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/SIB200ClusteringS2S.py @@ -0,0 +1,262 @@ +from __future__ import annotations + +from datasets import Dataset, DatasetDict + +from mteb.abstasks import MultilingualTask +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +_LANGS = { + "ace_Latn": ["ace-Latn"], + "acm_Arab": ["acm-Arab"], + "acq_Arab": ["acq-Arab"], + "aeb_Arab": ["aeb-Arab"], + "afr_Latn": ["afr-Latn"], + "ajp_Arab": ["ajp-Arab"], + "aka_Latn": ["aka-Latn"], + "als_Latn": ["als-Latn"], + "amh_Ethi": ["amh-Ethi"], + "apc_Arab": ["apc-Arab"], + "arb_Latn": ["arb-Latn"], + "ars_Arab": ["ars-Arab"], + "ary_Arab": ["ary-Arab"], + "arz_Arab": ["arz-Arab"], + "asm_Beng": ["asm-Beng"], + "ast_Latn": ["ast-Latn"], + "awa_Deva": ["awa-Deva"], + "ayr_Latn": ["ayr-Latn"], + "azb_Arab": ["azb-Arab"], + "azj_Latn": ["azj-Latn"], + "bak_Cyrl": ["bak-Cyrl"], + "bam_Latn": ["bam-Latn"], + "ban_Latn": ["ban-Latn"], + "bel_Cyrl": ["bel-Cyrl"], + "bem_Latn": ["bem-Latn"], + "ben_Beng": ["ben-Beng"], + "bho_Deva": ["bho-Deva"], + "bjn_Latn": ["bjn-Latn"], + "bod_Tibt": ["bod-Tibt"], + "bos_Latn": ["bos-Latn"], + "bug_Latn": ["bug-Latn"], + "bul_Cyrl": ["bul-Cyrl"], + "cat_Latn": ["cat-Latn"], + "ceb_Latn": ["ceb-Latn"], + "ces_Latn": ["ces-Latn"], + "cjk_Latn": ["cjk-Latn"], + "ckb_Arab": ["ckb-Arab"], + "crh_Latn": ["crh-Latn"], + "cym_Latn": ["cym-Latn"], + "dan_Latn": ["dan-Latn"], + "deu_Latn": ["deu-Latn"], + "dik_Latn": ["dik-Latn"], + "dyu_Latn": ["dyu-Latn"], + "dzo_Tibt": ["dzo-Tibt"], + "ell_Grek": ["ell-Grek"], + "eng_Latn": ["eng-Latn"], + "epo_Latn": ["epo-Latn"], + "est_Latn": ["est-Latn"], + "eus_Latn": ["eus-Latn"], + "ewe_Latn": ["ewe-Latn"], + "fao_Latn": ["fao-Latn"], + "fij_Latn": ["fij-Latn"], + "fin_Latn": ["fin-Latn"], + "fon_Latn": ["fon-Latn"], + "fra_Latn": ["fra-Latn"], + "fur_Latn": ["fur-Latn"], + "fuv_Latn": ["fuv-Latn"], + "gaz_Latn": ["gaz-Latn"], + "gla_Latn": ["gla-Latn"], + "gle_Latn": ["gle-Latn"], + "glg_Latn": ["glg-Latn"], + "grn_Latn": ["grn-Latn"], + "guj_Gujr": ["guj-Gujr"], + "hat_Latn": ["hat-Latn"], + "hau_Latn": ["hau-Latn"], + "heb_Hebr": ["heb-Hebr"], + "hin_Deva": ["hin-Deva"], + "hne_Deva": ["hne-Deva"], + "hrv_Latn": ["hrv-Latn"], + "hun_Latn": ["hun-Latn"], + "hye_Armn": ["hye-Armn"], + "ibo_Latn": ["ibo-Latn"], + "ilo_Latn": ["ilo-Latn"], + "ind_Latn": ["ind-Latn"], + "isl_Latn": ["isl-Latn"], + "ita_Latn": ["ita-Latn"], + "jav_Latn": ["jav-Latn"], + "jpn_Jpan": ["jpn-Jpan"], + "kab_Latn": ["kab-Latn"], + "kac_Latn": ["kac-Latn"], + "kam_Latn": ["kam-Latn"], + "kan_Knda": ["kan-Knda"], + "kas_Deva": ["kas-Deva"], + "kat_Geor": ["kat-Geor"], + "kaz_Cyrl": ["kaz-Cyrl"], + "kbp_Latn": ["kbp-Latn"], + "kea_Latn": ["kea-Latn"], + "khk_Cyrl": ["khk-Cyrl"], + "khm_Khmr": ["khm-Khmr"], + "kik_Latn": ["kik-Latn"], + "kin_Latn": ["kin-Latn"], + "kir_Cyrl": ["kir-Cyrl"], + "kmb_Latn": ["kmb-Latn"], + "kmr_Latn": ["kmr-Latn"], + "knc_Latn": ["knc-Latn"], + "kon_Latn": ["kon-Latn"], + "kor_Hang": ["kor-Hang"], + "lao_Laoo": ["lao-Laoo"], + "lij_Latn": ["lij-Latn"], + "lim_Latn": ["lim-Latn"], + "lin_Latn": ["lin-Latn"], + "lit_Latn": ["lit-Latn"], + "lmo_Latn": ["lmo-Latn"], + "ltg_Latn": ["ltg-Latn"], + "ltz_Latn": ["ltz-Latn"], + "lua_Latn": ["lua-Latn"], + "lug_Latn": ["lug-Latn"], + "luo_Latn": ["luo-Latn"], + "lus_Latn": ["lus-Latn"], + "lvs_Latn": ["lvs-Latn"], + "mag_Deva": ["mag-Deva"], + "mai_Deva": ["mai-Deva"], + "mal_Mlym": ["mal-Mlym"], + "mar_Deva": ["mar-Deva"], + "min_Latn": ["min-Latn"], + "mkd_Cyrl": ["mkd-Cyrl"], + "mlt_Latn": ["mlt-Latn"], + "mni_Beng": ["mni-Beng"], + "mos_Latn": ["mos-Latn"], + "mri_Latn": ["mri-Latn"], + "mya_Mymr": ["mya-Mymr"], + "nld_Latn": ["nld-Latn"], + "nno_Latn": ["nno-Latn"], + "nob_Latn": ["nob-Latn"], + "npi_Deva": ["npi-Deva"], + "nqo_Nkoo": ["nqo-Nkoo"], + "nso_Latn": ["nso-Latn"], + "nus_Latn": ["nus-Latn"], + "nya_Latn": ["nya-Latn"], + "oci_Latn": ["oci-Latn"], + "ory_Orya": ["ory-Orya"], + "pag_Latn": ["pag-Latn"], + "pan_Guru": ["pan-Guru"], + "pap_Latn": ["pap-Latn"], + "pbt_Arab": ["pbt-Arab"], + "pes_Arab": ["pes-Arab"], + "plt_Latn": ["plt-Latn"], + "pol_Latn": ["pol-Latn"], + "por_Latn": ["por-Latn"], + "prs_Arab": ["prs-Arab"], + "quy_Latn": ["quy-Latn"], + "ron_Latn": ["ron-Latn"], + "run_Latn": ["run-Latn"], + "rus_Cyrl": ["rus-Cyrl"], + "sag_Latn": ["sag-Latn"], + "san_Deva": ["san-Deva"], + "sat_Olck": ["sat-Olck"], + "scn_Latn": ["scn-Latn"], + "shn_Mymr": ["shn-Mymr"], + "sin_Sinh": ["sin-Sinh"], + "slk_Latn": ["slk-Latn"], + "slv_Latn": ["slv-Latn"], + "smo_Latn": ["smo-Latn"], + "sna_Latn": ["sna-Latn"], + "snd_Arab": ["snd-Arab"], + "som_Latn": ["som-Latn"], + "sot_Latn": ["sot-Latn"], + "spa_Latn": ["spa-Latn"], + "srd_Latn": ["srd-Latn"], + "srp_Cyrl": ["srp-Cyrl"], + "ssw_Latn": ["ssw-Latn"], + "sun_Latn": ["sun-Latn"], + "swe_Latn": ["swe-Latn"], + "swh_Latn": ["swh-Latn"], + "szl_Latn": ["szl-Latn"], + "tam_Taml": ["tam-Taml"], + "taq_Tfng": ["taq-Tfng"], + "tat_Cyrl": ["tat-Cyrl"], + "tel_Telu": ["tel-Telu"], + "tgk_Cyrl": ["tgk-Cyrl"], + "tgl_Latn": ["tgl-Latn"], + "tha_Thai": ["tha-Thai"], + "tir_Ethi": ["tir-Ethi"], + "tpi_Latn": ["tpi-Latn"], + "tsn_Latn": ["tsn-Latn"], + "tso_Latn": ["tso-Latn"], + "tuk_Latn": ["tuk-Latn"], + "tum_Latn": ["tum-Latn"], + "tur_Latn": ["tur-Latn"], + "twi_Latn": ["twi-Latn"], + "tzm_Tfng": ["tzm-Tfng"], + "uig_Arab": ["uig-Arab"], + "ukr_Cyrl": ["ukr-Cyrl"], + "umb_Latn": ["umb-Latn"], + "urd_Arab": ["urd-Arab"], + "uzn_Latn": ["uzn-Latn"], + "vec_Latn": ["vec-Latn"], + "vie_Latn": ["vie-Latn"], + "war_Latn": ["war-Latn"], + "wol_Latn": ["wol-Latn"], + "xho_Latn": ["xho-Latn"], + "ydd_Hebr": ["ydd-Hebr"], + "yor_Latn": ["yor-Latn"], + "yue_Hant": ["yue-Hant"], + "zho_Hant": ["zho-Hant"], + "zsm_Latn": ["zsm-Latn"], + "zul_Latn": ["zul-Latn"], +} + + +class SIB200ClusteringFast(MultilingualTask, AbsTaskClusteringFast): + metadata = TaskMetadata( + name="SIB200ClusteringFastS2S", + description="""SIB-200 is the largest publicly available topic classification + dataset based on Flores-200 covering 205 languages and dialects annotated. The dataset is + annotated in English for the topics, science/technology, travel, politics, sports, + health, entertainment, and geography. The labels are then transferred to the other languages + in Flores-200 which are machine-translated. + """, + reference="https://arxiv.org/abs/2309.07445", + dataset={ + "path": "mteb/sib200", + "revision": "a74d7350ea12af010cfb1c21e34f1f81fd2e615b", + }, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=_LANGS, + main_score="v_measure", + date=("2023-09-14", "2024-01-27"), + form=["written"], + domains=["News"], + task_subtypes=["Thematic clustering"], + license="cc-by-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", # expert annotated for English --> human translations + dialect=[], + text_creation="human-translated and localized", + bibtex_citation="""@article{adelani2023sib, + title={SIB-200: A simple, inclusive, and big evaluation dataset for topic classification in 200+ languages and dialects}, + author={Adelani, David Ifeoluwa and Liu, Hannah and Shen, Xiaoyu and Vassilyev, Nikita and Alabi, Jesujoba O and Mao, Yanke and Gao, Haonan and Lee, Annie En-Shiun}, + journal={arXiv preprint arXiv:2309.07445}, + year={2023} + }""", + n_samples={"test": 1004}, # combined train, validation, and test into test. + avg_character_length={"test": 114.78}, + ) + + def dataset_transform(self): + ds = dict() + for lang in self.hf_subsets: + labels = [] + sentences = [] + ds[lang] = dict() + for split in ["train", "validation", "test"]: + labels.extend(self.dataset[lang][split]["category"]) + sentences.extend(self.dataset[lang][split]["text"]) + split_ds = Dataset.from_dict({"labels": labels, "sentences": sentences}) + lang_dict = {"test": split_ds} + ds[lang] = DatasetDict(lang_dict) + self.dataset = DatasetDict(ds) + for lang in self.hf_subsets: + self.dataset[lang] = self.dataset[lang].class_encode_column("labels") diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/WikiClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/WikiClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb44b15f1a23f79b5a13460e1cc20d46973b6b6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/WikiClusteringP2P.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +import itertools + +import numpy as np +from datasets import Dataset, DatasetDict + +from mteb.abstasks import AbsTaskClustering, MultilingualTask +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +_LANGUAGES = { + "bs": ["bos-Latn"], + "ca": ["cat-Latn"], + "cs": ["ces-Latn"], + "da": ["dan-Latn"], + "eu": ["eus-Latn"], + "gv": ["glv-Latn"], + "ilo": ["ilo-Latn"], + "ku": ["kur-Latn"], + "lv": ["lav-Latn"], + "min": ["min-Latn"], + "mt": ["mlt-Latn"], + "sco": ["sco-Latn"], + "sq": ["sqi-Latn"], + "wa": ["wln-Latn"], +} + + +class WikiClusteringP2P(AbsTaskClustering, MultilingualTask): + superseeded_by = "WikiClusteringFastP2P" + metadata = TaskMetadata( + name="WikiClusteringP2P", + description="Clustering of wikipedia articles inspired by BlubrbsClusteringP2P. Labels are taken from top-level categories of the respective languages (e.g., https://lv.wikipedia.org/wiki/Kategorija:Pamatkategorijas).", + reference="https://github.com/Rysias/wiki-clustering", + dataset={ + "path": "ryzzlestrizzle/multi-wiki-clustering-p2p", + "revision": "d4d92f8f28be71035be6a96bdfd4e200cf62faa8", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="v_measure", + date=("2001-01-15", "2024-04-15"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Thematic clustering"], + license="cc-by-sa-3.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation=None, # None exists + n_samples={"test": 71680}, + avg_character_length={"test": 625.3}, + ) + + +class WikiClusteringFastP2P(AbsTaskClusteringFast, MultilingualTask): + metadata = TaskMetadata( + name="WikiClusteringFastP2P", + description="Clustering of wikipedia articles inspired by BlubrbsClusteringP2P. Labels are taken from top-level categories of the respective languages (e.g., https://lv.wikipedia.org/wiki/Kategorija:Pamatkategorijas).", + reference="https://github.com/Rysias/wiki-clustering", + dataset={ + "path": "ryzzlestrizzle/multi-wiki-clustering-p2p", + "revision": "d4d92f8f28be71035be6a96bdfd4e200cf62faa8", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="v_measure", + date=("2001-01-15", "2024-04-15"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Thematic clustering"], + license="cc-by-sa-3.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="created", + bibtex_citation="", # None exists + n_samples={"test": 2048}, + avg_character_length={"test": 625.3}, + ) + + def dataset_transform(self): + ds = dict() + for lang in self.hf_subsets: + labels = [] + sentences = [] + ds[lang] = dict() + lang_dict = dict() + for split in self.metadata.eval_splits: + labels.extend( + itertools.chain.from_iterable(self.dataset[lang][split]["labels"]) + ) + sentences.extend( + itertools.chain.from_iterable( + self.dataset[lang][split]["sentences"] + ) + ) + + # Remove sentences and labels with only 1 label example. + unique_labels, counts = np.unique(labels, return_counts=True) + solo_label_idx = np.where(counts == 1) + solo_labels = unique_labels[solo_label_idx] + is_solo = np.isin(labels, solo_labels) + split_ds = Dataset.from_dict({"labels": labels, "sentences": sentences}) + if is_solo.any(): + split_ds = split_ds.select(np.nonzero(is_solo == False)[0]) # noqa: E712 + lang_dict.update({split: split_ds}) + ds[lang] = DatasetDict(lang_dict) + self.dataset = DatasetDict(ds) + for lang in self.hf_subsets: + self.dataset[lang] = self.stratified_subsampling( + self.dataset[lang], + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=2048, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/multilingual/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/SNLHierarchicalClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/SNLHierarchicalClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..f8d67a0a86976a926e10e2f02ff57809028baad0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/SNLHierarchicalClustering.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + + +def split_labels(record: dict) -> dict: + record["labels"] = record["labels"].split(",") + # First level is trivial + record["labels"] = record["labels"][1:] + return record + + +class SNLHierarchicalClusteringP2P(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="SNLHierarchicalClusteringP2P", + dataset={ + "path": "navjordj/SNL_summarization", + "revision": "3d3d27aa7af8941408cefc3991ada5d12a4273d1", + }, + description="Webscrabed articles from the Norwegian lexicon 'Det Store Norske Leksikon'. Uses articles categories as clusters.", + reference="https://huggingface.co/datasets/navjordj/SNL_summarization", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="v_measure", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["Encyclopaedic", "Non-fiction"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@mastersthesis{navjord2023beyond, + title={Beyond extractive: advancing abstractive automatic text summarization in Norwegian with transformers}, + author={Navjord, J{\o}rgen Johnsen and Korsvik, Jon-Mikkel Ryen}, + year={2023}, + school={Norwegian University of Life Sciences, {\AA}s} +}""", + n_samples={"test": 1300}, + avg_character_length={"test": 1986.9453846153847}, + ) + max_depth = 5 + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns( + {"article": "sentences", "category": "labels"} + ) + self.dataset = self.dataset.map(split_labels) + + +class SNLHierarchicalClusteringS2S(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="SNLHierarchicalClusteringS2S", + dataset={ + "path": "navjordj/SNL_summarization", + "revision": "3d3d27aa7af8941408cefc3991ada5d12a4273d1", + }, + description="Webscrabed articles from the Norwegian lexicon 'Det Store Norske Leksikon'. Uses articles categories as clusters.", + reference="https://huggingface.co/datasets/navjordj/SNL_summarization", + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="v_measure", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["Encyclopaedic", "Non-fiction"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@mastersthesis{navjord2023beyond, + title={Beyond extractive: advancing abstractive automatic text summarization in Norwegian with transformers}, + author={Navjord, J{\o}rgen Johnsen and Korsvik, Jon-Mikkel Ryen}, + year={2023}, + school={Norwegian University of Life Sciences, {\AA}s} +}""", + n_samples={"test": 1300}, + avg_character_length={"test": 242.22384615384615}, + ) + max_depth = 5 + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns( + {"ingress": "sentences", "category": "labels"} + ) + self.dataset = self.dataset.map(split_labels) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/VGHierarchicalClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/VGHierarchicalClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..6e34b026070d69b9cecd8f8fac58c2d1698eeb68 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/VGHierarchicalClustering.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from mteb.abstasks import TaskMetadata +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast + +N_SAMPLES = 2048 + + +def split_labels(record: dict) -> dict: + record["labels"] = record["labels"].split(",")[:2] + return record + + +class VGHierarchicalClusteringP2P(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="VGHierarchicalClusteringP2P", + dataset={ + "path": "navjordj/VG_summarization", + "revision": "d4c5a8ba10ae71224752c727094ac4c46947fa29", + }, + description="Articles and their classes (e.g. sports) from VG news articles extracted from Norsk Aviskorpus.", + reference="https://huggingface.co/datasets/navjordj/VG_summarization", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="v_measure", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["News", "Non-fiction"], + license="CC-BY-NC 4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@mastersthesis{navjord2023beyond, + title={Beyond extractive: advancing abstractive automatic text summarization in Norwegian with transformers}, + author={Navjord, J{\o}rgen Johnsen and Korsvik, Jon-Mikkel Ryen}, + year={2023}, + school={Norwegian University of Life Sciences, {\AA}s} +}""", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 2670.3243084794544}, + ) + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns( + {"article": "sentences", "classes": "labels"} + ) + self.dataset = self.dataset.map(split_labels) + # Subsampling the dataset + self.dataset["test"] = self.dataset["test"].train_test_split( + test_size=N_SAMPLES, seed=self.seed + )["test"] + + +class VGHierarchicalClusteringS2S(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="VGHierarchicalClusteringS2S", + dataset={ + "path": "navjordj/VG_summarization", + "revision": "d4c5a8ba10ae71224752c727094ac4c46947fa29", + }, + description="Articles and their classes (e.g. sports) from VG news articles extracted from Norsk Aviskorpus.", + reference="https://huggingface.co/datasets/navjordj/VG_summarization", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="v_measure", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["News", "Non-fiction"], + license="CC-BY-NC 4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@mastersthesis{navjord2023beyond, + title={Beyond extractive: advancing abstractive automatic text summarization in Norwegian with transformers}, + author={Navjord, J{\o}rgen Johnsen and Korsvik, Jon-Mikkel Ryen}, + year={2023}, + school={Norwegian University of Life Sciences, {\AA}s} +}""", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 139.31247668283325}, + ) + + def dataset_transform(self) -> None: + self.dataset = self.dataset.rename_columns( + {"ingress": "sentences", "classes": "labels"} + ) + self.dataset = self.dataset.map(split_labels) + # Subsampling the dataset + self.dataset["test"] = self.dataset["test"].train_test_split( + test_size=N_SAMPLES, seed=self.seed + )["test"] diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/snl_clustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/snl_clustering.py new file mode 100644 index 0000000000000000000000000000000000000000..c8e469656aa83244423389c4c304160dce3735ec --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/snl_clustering.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import random +from itertools import islice +from typing import Iterable, TypeVar + +import datasets + +from mteb.abstasks import AbsTaskClustering, TaskMetadata + +T = TypeVar("T") + + +def batched(iterable: Iterable[T], n: int) -> Iterable[tuple[T, ...]]: + # batched('ABCDEFG', 3) --> ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch + + +class SNLClustering(AbsTaskClustering): + superseeded_by = "SNLHierarchicalClusteringP2P" + metadata = TaskMetadata( + name="SNLClustering", + dataset={ + "path": "navjordj/SNL_summarization", + "revision": "3d3d27aa7af8941408cefc3991ada5d12a4273d1", + }, + description="Webscrabed articles from the Norwegian lexicon 'Det Store Norske Leksikon'. Uses articles categories as clusters.", + reference="https://huggingface.co/datasets/navjordj/SNL_summarization", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="v_measure", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["Encyclopaedic", "Non-fiction"], + license=None, + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@mastersthesis{navjord2023beyond, + title={Beyond extractive: advancing abstractive automatic text summarization in Norwegian with transformers}, + author={Navjord, J{\o}rgen Johnsen and Korsvik, Jon-Mikkel Ryen}, + year={2023}, + school={Norwegian University of Life Sciences, {\AA}s} +}""", + n_samples={"test": 2048}, + avg_character_length={"test": 1101.30}, + ) + + def dataset_transform(self): + splits = self.metadata_dict["eval_splits"] + + documents: list = [] + labels: list = [] + label_col = "category" + + ds = {} + for split in splits: + ds_split = self.dataset[split] + + _label = self.normalize_labels(ds_split[label_col]) + documents.extend(ds_split["ingress"]) + labels.extend(_label) + + documents.extend(ds_split["article"]) + labels.extend(_label) + + assert len(documents) == len(labels) + + rng = random.Random(42) # local only seed + pairs = list(zip(documents, labels)) + rng.shuffle(pairs) + documents, labels = [list(collection) for collection in zip(*pairs)] + + # reduce size of dataset to not have too large datasets in the clustering task + documents_batched = list(batched(documents, 512))[:4] + labels_batched = list(batched(labels, 512))[:4] + + ds[split] = datasets.Dataset.from_dict( + { + "sentences": documents_batched, + "labels": labels_batched, + } + ) + + self.dataset = datasets.DatasetDict(ds) + + @staticmethod + def normalize_labels(labels: list[str]) -> list[str]: + # example label: + # Store norske leksikon,Kunst og estetikk,Musikk,Klassisk musikk,Internasjonale dirigenter + # When using 2 levels there is 17 unique labels + # When using 3 levels there is 121 unique labels + return [",".join(tuple(label.split(",")[:3])) for label in labels] diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/vg_clustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/vg_clustering.py new file mode 100644 index 0000000000000000000000000000000000000000..e7792b77429fbb8dfadbef507e935732dc8415a4 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/nob/vg_clustering.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import random +from itertools import islice +from typing import Iterable, TypeVar + +import datasets + +from mteb.abstasks import AbsTaskClustering, TaskMetadata + +T = TypeVar("T") + + +def batched(iterable: Iterable[T], n: int) -> Iterable[tuple[T, ...]]: + # batched('ABCDEFG', 3) --> ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch + + +class VGClustering(AbsTaskClustering): + superseeded_by = "VGHierarchicalClusteringP2P" + metadata = TaskMetadata( + name="VGClustering", + dataset={ + "path": "navjordj/VG_summarization", + "revision": "d4c5a8ba10ae71224752c727094ac4c46947fa29", + }, + description="Articles and their classes (e.g. sports) from VG news articles extracted from Norsk Aviskorpus.", + reference="https://huggingface.co/datasets/navjordj/VG_summarization", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="v_measure", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["News", "Non-fiction"], + license=None, + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@mastersthesis{navjord2023beyond, + title={Beyond extractive: advancing abstractive automatic text summarization in Norwegian with transformers}, + author={Navjord, J{\o}rgen Johnsen and Korsvik, Jon-Mikkel Ryen}, + year={2023}, + school={Norwegian University of Life Sciences, {\AA}s} +}""", + n_samples={"test": 2048}, + avg_character_length={"test": 1009.65}, + ) + + def dataset_transform(self): + splits = self.metadata_dict["eval_splits"] + + documents: list = [] + labels: list = [] + label_col = "classes" + + ds = {} + for split in splits: + ds_split = self.dataset[split] + + _label = self.normalize_labels(ds_split[label_col]) + documents.extend(ds_split["title"]) + labels.extend(_label) + + documents.extend(ds_split["ingress"]) + labels.extend(_label) + + documents.extend(ds_split["article"]) + labels.extend(_label) + + assert len(documents) == len(labels) + + rng = random.Random(1111) # local only seed + # resampling changes scores from 12.68, 11.30, 12.65 (sample model) + pairs = list(zip(documents, labels)) + rng.shuffle(pairs) + documents, labels = [list(collection) for collection in zip(*pairs)] + + # reduce size of dataset to not have too large datasets in the clustering task + documents_batched = list(batched(documents, 512))[:4] + labels_batched = list(batched(labels, 512))[:4] + # See: + # https://github.com/KennethEnevoldsen/scandinavian-embedding-benchmark/pull/96 + # for a discussion on sizes + + ds[split] = datasets.Dataset.from_dict( + { + "sentences": documents_batched, + "labels": labels_batched, + } + ) + + self.dataset = datasets.DatasetDict(ds) + + @staticmethod + def normalize_labels(labels: list[str]) -> list[str]: + # Agreed on and debated in: https://github.com/KennethEnevoldsen/scandinavian-embedding-benchmark/issues/83 + return [label.split(",")[0] for label in labels] diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/pol/PolishClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/pol/PolishClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..3c6ff6187cf27705208e97ba43fc7aaeac079cc8 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/pol/PolishClustering.py @@ -0,0 +1,314 @@ +from __future__ import annotations + +from itertools import chain + +import numpy as np +from datasets import Dataset, DatasetDict + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 2048 + + +class EightTagsClustering(AbsTaskClustering): + superseeded_by = "EightTagsClustering.v2" + metadata = TaskMetadata( + name="EightTagsClustering", + description="Clustering of headlines from social media posts in Polish belonging to 8 categories: film, history, " + "food, medicine, motorization, work, sport and technology.", + reference="https://aclanthology.org/2020.lrec-1.207.pdf", + dataset={ + "path": "PL-MTEB/8tags-clustering", + "revision": "78b962b130c6690659c65abf67bf1c2f030606b6", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="v_measure", + date=("2019-01-01", "2020-05-01"), + form=["written"], + domains=["Social"], + task_subtypes=["Topic classification", "Thematic clustering"], + license="GPL-3.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{dadas-etal-2020-evaluation, + title = "Evaluation of Sentence Representations in {P}olish", + author = "Dadas, Slawomir and + Pere{\l}kiewicz, Micha{\l} and + Po{\'s}wiata, Rafa{\l}", + editor = "Calzolari, Nicoletta and + B{\'e}chet, Fr{\'e}d{\'e}ric and + Blache, Philippe and + Choukri, Khalid and + Cieri, Christopher and + Declerck, Thierry and + Goggi, Sara and + Isahara, Hitoshi and + Maegaard, Bente and + Mariani, Joseph and + Mazo, H{\'e}l{\`e}ne and + Moreno, Asuncion and + Odijk, Jan and + Piperidis, Stelios", + booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference", + month = may, + year = "2020", + address = "Marseille, France", + publisher = "European Language Resources Association", + url = "https://aclanthology.org/2020.lrec-1.207", + pages = "1674--1680", + abstract = "Methods for learning sentence representations have been actively developed in recent years. However, the lack of pre-trained models and datasets annotated at the sentence level has been a problem for low-resource languages such as Polish which led to less interest in applying these methods to language-specific tasks. In this study, we introduce two new Polish datasets for evaluating sentence embeddings and provide a comprehensive evaluation of eight sentence representation methods including Polish and multilingual models. We consider classic word embedding models, recently developed contextual embeddings and multilingual sentence encoders, showing strengths and weaknesses of specific approaches. We also examine different methods of aggregating word vectors into a single sentence vector.", + language = "English", + ISBN = "979-10-95546-34-4", + }""", + n_samples={"test": 49373}, + avg_character_length={"test": 78.23}, + ) + + +class EightTagsClusteringFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="EightTagsClustering.v2", + description="Clustering of headlines from social media posts in Polish belonging to 8 categories: film, history, " + "food, medicine, motorization, work, sport and technology.", + reference="https://aclanthology.org/2020.lrec-1.207.pdf", + dataset={ + "path": "PL-MTEB/8tags-clustering", + "revision": "78b962b130c6690659c65abf67bf1c2f030606b6", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="v_measure", + date=("2019-01-01", "2020-05-01"), + form=["written"], + domains=["Social"], + task_subtypes=["Topic classification", "Thematic clustering"], + license="GPL-3.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{dadas-etal-2020-evaluation, + title = "Evaluation of Sentence Representations in {P}olish", + author = "Dadas, Slawomir and + Pere{\l}kiewicz, Micha{\l} and + Po{\'s}wiata, Rafa{\l}", + editor = "Calzolari, Nicoletta and + B{\'e}chet, Fr{\'e}d{\'e}ric and + Blache, Philippe and + Choukri, Khalid and + Cieri, Christopher and + Declerck, Thierry and + Goggi, Sara and + Isahara, Hitoshi and + Maegaard, Bente and + Mariani, Joseph and + Mazo, H{\'e}l{\`e}ne and + Moreno, Asuncion and + Odijk, Jan and + Piperidis, Stelios", + booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference", + month = may, + year = "2020", + address = "Marseille, France", + publisher = "European Language Resources Association", + url = "https://aclanthology.org/2020.lrec-1.207", + pages = "1674--1680", + abstract = "Methods for learning sentence representations have been actively developed in recent years. However, the lack of pre-trained models and datasets annotated at the sentence level has been a problem for low-resource languages such as Polish which led to less interest in applying these methods to language-specific tasks. In this study, we introduce two new Polish datasets for evaluating sentence embeddings and provide a comprehensive evaluation of eight sentence representation methods including Polish and multilingual models. We consider classic word embedding models, recently developed contextual embeddings and multilingual sentence encoders, showing strengths and weaknesses of specific approaches. We also examine different methods of aggregating word vectors into a single sentence vector.", + language = "English", + ISBN = "979-10-95546-34-4", + }""", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 78.73}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(chain.from_iterable(self.dataset[split]["labels"])) + sentences = list(chain.from_iterable(self.dataset[split]["sentences"])) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=N_SAMPLES, + ) + + +class PlscClusteringS2S(AbsTaskClusteringFast): + superseeded_by = "PlscClusteringS2S.v2" + metadata = TaskMetadata( + name="PlscClusteringS2S", + description="Clustering of Polish article titles from Library of Science (https://bibliotekanauki.pl/), either " + "on the scientific field or discipline.", + reference="https://huggingface.co/datasets/rafalposwiata/plsc", + dataset={ + "path": "PL-MTEB/plsc-clustering-s2s", + "revision": "39bcadbac6b1eddad7c1a0a176119ce58060289a", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="v_measure", + date=("2022-04-04", "2023-09-12"), + form=["written"], + domains=["Academic"], + task_subtypes=["Topic classification", "Thematic clustering"], + license="cc0-1.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": 17534}, + avg_character_length={"test": 84.34}, + ) + + +class PlscClusteringS2SFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="PlscClusteringS2S.v2", + description="Clustering of Polish article titles from Library of Science (https://bibliotekanauki.pl/), either " + "on the scientific field or discipline.", + reference="https://huggingface.co/datasets/rafalposwiata/plsc", + dataset={ + "path": "PL-MTEB/plsc-clustering-s2s", + "revision": "39bcadbac6b1eddad7c1a0a176119ce58060289a", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="v_measure", + date=("2022-04-04", "2023-09-12"), + form=["written"], + domains=["Academic"], + task_subtypes=["Topic classification", "Thematic clustering"], + license="cc0-1.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 84.34}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = self.dataset[split]["labels"] + sentences = self.dataset[split]["sentences"] + # Remove sentences and labels with only 1 label example. + unique_labels, counts = np.unique(labels, return_counts=True) + solo_label_idx = np.where(counts == 1) + solo_labels = unique_labels[solo_label_idx] + is_solo = np.isin(labels, solo_labels) + split_ds = Dataset.from_dict({"labels": labels, "sentences": sentences}) + if is_solo.any(): + split_ds = split_ds.select(np.nonzero(is_solo == False)[0]) # noqa: E712 + ds[split] = split_ds + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=N_SAMPLES, + ) + + +class PlscClusteringP2P(AbsTaskClusteringFast): + superseeded_by = "PlscClusteringP2P.v2" + metadata = TaskMetadata( + name="PlscClusteringP2P", + description="Clustering of Polish article titles+abstracts from Library of Science " + "(https://bibliotekanauki.pl/), either on the scientific field or discipline.", + reference="https://huggingface.co/datasets/rafalposwiata/plsc", + dataset={ + "path": "PL-MTEB/plsc-clustering-p2p", + "revision": "8436dd4c05222778013d6642ee2f3fa1722bca9b", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="v_measure", + date=("2022-04-04", "2023-09-12"), + form=["written"], + domains=["Academic"], + task_subtypes=["Topic classification", "Thematic clustering"], + license="cc0-1.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": 17537}, + avg_character_length={"test": 1023.21}, + ) + + +class PlscClusteringP2PFast(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="PlscClusteringP2P.v2", + description="Clustering of Polish article titles+abstracts from Library of Science " + "(https://bibliotekanauki.pl/), either on the scientific field or discipline.", + reference="https://huggingface.co/datasets/rafalposwiata/plsc", + dataset={ + "path": "PL-MTEB/plsc-clustering-p2p", + "revision": "8436dd4c05222778013d6642ee2f3fa1722bca9b", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="v_measure", + date=("2022-04-04", "2023-09-12"), + form=["written"], + domains=["Academic"], + task_subtypes=["Topic classification", "Thematic clustering"], + license="cc0-1.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 1023.21}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = self.dataset[split]["labels"] + sentences = self.dataset[split]["sentences"] + # Remove sentences and labels with only 1 label example. + unique_labels, counts = np.unique(labels, return_counts=True) + solo_label_idx = np.where(counts == 1) + solo_labels = unique_labels[solo_label_idx] + is_solo = np.isin(labels, solo_labels) + split_ds = Dataset.from_dict({"labels": labels, "sentences": sentences}) + if is_solo.any(): + split_ds = split_ds.select(np.nonzero(is_solo == False)[0]) # noqa: E712 + ds[split] = split_ds + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=N_SAMPLES, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/pol/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/pol/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/rom/RomaniBibleClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/rom/RomaniBibleClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..560877fc6085ee1eb96c295be7efbe47508a2cb1 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/rom/RomaniBibleClustering.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskClustering, TaskMetadata + + +class RomaniBibleClustering(AbsTaskClustering): + metadata = TaskMetadata( + name="RomaniBibleClustering", + description="Clustering verses from the Bible in Kalderash Romani by book.", + reference="https://romani.global.bible/info", + dataset={ + "path": "kardosdrur/romani-bible", + "revision": "97fae0e80a8d275bc685dcb3da08972af542ad6e", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["rom-Latn"], + main_score="v_measure", + date=("2020-01-01", "2020-12-31"), + form=["written"], + domains=["Religious"], + task_subtypes=["Thematic clustering"], + license="MIT", + socioeconomic_status="low", + annotations_creators="derived", + dialect=["Kalderash"], + text_creation="human-translated and localized", + bibtex_citation=None, + n_samples={"test": 2048}, + avg_character_length={"test": 132.2}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/spa/SpanishNewsClusteringP2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/spa/SpanishNewsClusteringP2P.py new file mode 100644 index 0000000000000000000000000000000000000000..77416461280b5a5ea612c3acd34417c49ba34b5d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/spa/SpanishNewsClusteringP2P.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskClustering import AbsTaskClustering + + +class SpanishNewsClusteringP2P(AbsTaskClustering): + metadata = TaskMetadata( + name="SpanishNewsClusteringP2P", + description="Clustering of news articles, 7 topics in total.", + reference="https://www.kaggle.com/datasets/kevinmorgado/spanish-news-classification", + dataset={ + "path": "jinaai/spanish_news_clustering", + "revision": "bf8ca8ddc5b7da4f7004720ddf99bbe0483480e6", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["spa-Latn"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/spa/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/spa/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/SwednClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/SwednClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..c977dac45728d65259070c147ffcfb65bf4a9d7a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/SwednClustering.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks import TaskMetadata +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast + + +def dataset_transform(self): + """The article_category clusters differ between the splits (with the test set only having 1 cluster). Therefore we combine it all into one + cluster. + """ + splits = ["train", "validation"] + # performance of sample models with test set: 8.74, 2.43 -removing test-> 11.26, 4.27 + # this is due to the test set only having 1 cluster which is "other" + + label_col = "article_category" + + labels_headlines = [] + labels_summaries = [] + labels_articles = [] + docs_headlines = [] + docs_summaries = [] + docs_articles = [] + + for split in splits: + ds_split = self.dataset[split] + + docs_headlines.extend(ds_split["headline"]) + labels_headlines.extend(ds_split[label_col]) + + docs_summaries.extend(ds_split["summary"]) + labels_summaries.extend(ds_split[label_col]) + + docs_articles.extend(ds_split["article"]) + labels_articles.extend(ds_split[label_col]) + + ds_headlines = datasets.Dataset.from_dict( + {"sentences": docs_headlines, "labels": labels_headlines} + ) + ds_summaries = datasets.Dataset.from_dict( + {"sentences": docs_summaries, "labels": labels_summaries} + ) + ds_articles = datasets.Dataset.from_dict( + {"sentences": docs_articles, "labels": labels_articles} + ) + + self.dataset = datasets.DatasetDict( + { + "headlines": ds_headlines, + "summaries": ds_summaries, + "articles": ds_articles, + } + ) + + +class SwednClusteringP2P(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="SwednClusteringP2P", + dataset={ + "path": "sbx/superlim-2", + "revision": "ef1661775d746e0844b299164773db733bdc0bf6", + "name": "swedn", + }, + description="The SWE-DN corpus is based on 1,963,576 news articles from the Swedish newspaper Dagens Nyheter (DN) during the years 2000--2020. The articles are filtered to resemble the CNN/DailyMail dataset both regarding textual structure. This dataset uses the category labels as clusters.", + reference="https://spraakbanken.gu.se/en/resources/swedn", + type="Clustering", + category="p2p", + eval_splits=["summaries", "articles"], + eval_langs=["swe-Latn"], + main_score="v_measure", + date=("2000-01-01", "2020-12-31"), # best guess + form=["written"], + domains=["News", "Non-fiction"], + license=None, + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@inproceedings{monsen2021method, + title={A method for building non-english corpora for abstractive text summarization}, + author={Monsen, Julius and J{\"o}nsson, Arne}, + booktitle={Proceedings of CLARIN Annual Conference}, + year={2021} +}""", + n_samples={"all": 2048}, + avg_character_length={"all": 1619.71}, + ) + + def dataset_transform(self): + dataset_transform(self) + + +class SwednClusteringFastS2S(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="SwednClusteringS2S", + dataset={ + "path": "sbx/superlim-2", + "revision": "ef1661775d746e0844b299164773db733bdc0bf6", + "name": "swedn", + }, + description="The SWE-DN corpus is based on 1,963,576 news articles from the Swedish newspaper Dagens Nyheter (DN) during the years 2000--2020. The articles are filtered to resemble the CNN/DailyMail dataset both regarding textual structure. This dataset uses the category labels as clusters.", + reference="https://spraakbanken.gu.se/en/resources/swedn", + type="Clustering", + category="s2s", + eval_splits=["headlines"], + eval_langs=["swe-Latn"], + main_score="v_measure", + date=("2000-01-01", "2020-12-31"), # best guess + form=["written"], + domains=["News", "Non-fiction"], + license=None, + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@inproceedings{monsen2021method, + title={A method for building non-english corpora for abstractive text summarization}, + author={Monsen, Julius and J{\"o}nsson, Arne}, + booktitle={Proceedings of CLARIN Annual Conference}, + year={2021} +}""", + n_samples={"all": 2048}, + avg_character_length={"all": 1619.71}, + ) + + def dataset_transform(self): + dataset_transform(self) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/swedn_clustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/swedn_clustering.py new file mode 100644 index 0000000000000000000000000000000000000000..f31120c0dd9be3eb2c9e6f83ecb49c7e8616c7c3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/swe/swedn_clustering.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +import random +from collections.abc import Iterable +from itertools import islice +from typing import TypeVar + +import datasets + +from mteb.abstasks import AbsTaskClustering, TaskMetadata + +T = TypeVar("T") + + +def batched(iterable: Iterable[T], n: int) -> Iterable[tuple[T, ...]]: + # batched('ABCDEFG', 3) --> ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch + + +class SwednClustering(AbsTaskClustering): + superseeded_by = "SwednClusteringP2P" + + metadata = TaskMetadata( + name="SwednClustering", + dataset={ + "path": "sbx/superlim-2", + "revision": "ef1661775d746e0844b299164773db733bdc0bf6", + "name": "swedn", + }, + description="The SWE-DN corpus is based on 1,963,576 news articles from the Swedish newspaper Dagens Nyheter (DN) during the years 2000--2020. The articles are filtered to resemble the CNN/DailyMail dataset both regarding textual structure. This dataset uses the category labels as clusters.", + reference="https://spraakbanken.gu.se/en/resources/swedn", + type="Clustering", + category="p2p", + eval_splits=["all"], + eval_langs=["swe-Latn"], + main_score="v_measure", + date=("2000-01-01", "2020-12-31"), # best guess + form=["written"], + domains=["News", "Non-fiction"], + license=None, + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + task_subtypes=["Thematic clustering"], + text_creation="found", + bibtex_citation="""@inproceedings{monsen2021method, + title={A method for building non-english corpora for abstractive text summarization}, + author={Monsen, Julius and J{\"o}nsson, Arne}, + booktitle={Proceedings of CLARIN Annual Conference}, + year={2021} +}""", + n_samples={"all": 2048}, + avg_character_length={"all": 1619.71}, + ) + + def dataset_transform(self): + """The article_category clusters differ between the splits (with the test set only having 1 cluster). Therefore we combine it all into one + cluster. + """ + splits = ["train", "validation"] + # performance of sample models with test set: 8.74, 2.43 -removing test-> 11.26, 4.27 + # this is due to the test set only having 1 cluster which is "other" + + headlines = [] + summaries = [] + articles = [] + headline_labels = [] + sammary_labels = [] + article_labels = [] + label_col = "article_category" + + for split in splits: + ds_split = self.dataset[split] + headlines.extend(ds_split["headline"]) + headline_labels.extend(ds_split[label_col]) + + summaries.extend(ds_split["summary"]) + sammary_labels.extend(ds_split[label_col]) + + articles.extend(ds_split["article"]) + article_labels.extend(ds_split[label_col]) + + rng = random.Random(42) # local only seed + + clusters_text = [] + clusters_labels = [] + doc_types = [(summaries, sammary_labels), (articles, article_labels)] + # Note that headlines is excluded: + # Scores of sample models with headlines: 11.26, 4.27 -removing headlines-> 16.43, 4.31 + # as headlines are soo short it is hard to meaningfully cluster them even for humans. + for text, labels in doc_types: + pairs = list(zip(text, labels)) + rng.shuffle(pairs) + # reduce size of dataset to not have too large datasets in the clustering task + pairs_batched = list(batched(pairs, 512)) + texts1, labels2 = list(zip(*pairs_batched[0])) + texts2, labels2 = list(zip(*pairs_batched[1])) + + clusters_text.extend([texts1, texts2]) + clusters_labels.extend([labels2, labels2]) + ds = datasets.Dataset.from_dict( + {"sentences": clusters_text, "labels": clusters_labels} + ) + self.dataset = datasets.DatasetDict({"all": ds}) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/zho/CMTEBClustering.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/zho/CMTEBClustering.py new file mode 100644 index 0000000000000000000000000000000000000000..fd9088188b13bd4c0132cf591585c75e9c0fc50f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/zho/CMTEBClustering.py @@ -0,0 +1,364 @@ +from __future__ import annotations + +import itertools + +from datasets import Dataset, DatasetDict + +from mteb.abstasks.AbsTaskClustering import AbsTaskClustering +from mteb.abstasks.AbsTaskClusteringFast import AbsTaskClusteringFast +from mteb.abstasks.TaskMetadata import TaskMetadata + +NUM_SAMPLES = 2048 + + +class CLSClusteringFastS2S(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="CLSClusteringS2S.v2", + description="Clustering of titles from CLS dataset. Clustering of 13 sets on the main category.", + reference="https://arxiv.org/abs/2209.05034", + dataset={ + "path": "C-MTEB/CLSClusteringS2S", + "revision": "e458b3f5414b62b7f9f83499ac1f5497ae2e869f", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=("2022-01-01", "2022-09-12"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering", "Topic classification"], + license="Apache-2.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{li2022csl, + title={CSL: A Large-scale Chinese Scientific Literature Dataset}, + author={Yudong Li and Yuqing Zhang and Zhe Zhao and Linlin Shen and Weijie Liu and Weiquan Mao and Hui Zhang}, + year={2022}, + eprint={2209.05034}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }""", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) + + +class CLSClusteringFastP2P(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="CLSClusteringP2P.v2", + description="Clustering of titles + abstract from CLS dataset. Clustering of 13 sets on the main category.", + reference="https://arxiv.org/abs/2209.05034", + dataset={ + "path": "C-MTEB/CLSClusteringP2P", + "revision": "4b6227591c6c1a73bc76b1055f3b7f3588e72476", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=("2022-01-01", "2022-09-12"), + form=["written"], + domains=["Academic"], + task_subtypes=["Thematic clustering", "Topic classification"], + license="Apache-2.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{li2022csl, + title={CSL: A Large-scale Chinese Scientific Literature Dataset}, + author={Yudong Li and Yuqing Zhang and Zhe Zhao and Linlin Shen and Weijie Liu and Weiquan Mao and Hui Zhang}, + year={2022}, + eprint={2209.05034}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + }""", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) + + +class CLSClusteringS2S(AbsTaskClustering): + superseeded_by = "CLSClusteringS2S.v2" + metadata = TaskMetadata( + name="CLSClusteringS2S", + description="Clustering of titles from CLS dataset. Clustering of 13 sets on the main category.", + reference="https://arxiv.org/abs/2209.05034", + dataset={ + "path": "C-MTEB/CLSClusteringS2S", + "revision": "e458b3f5414b62b7f9f83499ac1f5497ae2e869f", + }, + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=""" +@article{li2022csl, + title={CSL: A large-scale Chinese scientific literature dataset}, + author={Li, Yudong and Zhang, Yuqing and Zhao, Zhe and Shen, Linlin and Liu, Weijie and Mao, Weiquan and Zhang, Hui}, + journal={arXiv preprint arXiv:2209.05034}, + year={2022} +} +""", + n_samples={"test": 100000}, + avg_character_length=None, + ) + + +class CLSClusteringP2P(AbsTaskClustering): + superseeded_by = "CLSClusteringP2P.v2" + metadata = TaskMetadata( + name="CLSClusteringP2P", + description="Clustering of titles + abstract from CLS dataset. Clustering of 13 sets on the main category.", + reference="https://arxiv.org/abs/2209.05034", + dataset={ + "path": "C-MTEB/CLSClusteringP2P", + "revision": "4b6227591c6c1a73bc76b1055f3b7f3588e72476", + }, + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 100000}, + avg_character_length=None, + ) + + +class ThuNewsClusteringFastS2S(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="ThuNewsClusteringS2S.v2", + dataset={ + "path": "C-MTEB/ThuNewsClusteringS2S", + "revision": "8a8b2caeda43f39e13c4bc5bea0f8a667896e10d", + }, + description="Clustering of titles from the THUCNews dataset", + reference="http://thuctc.thunlp.org/", + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=("2006-01-01", "2007-01-01"), + form=["written"], + domains=["News"], + task_subtypes=["Thematic clustering", "Topic classification"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) + + +class ThuNewsClusteringFastP2P(AbsTaskClusteringFast): + metadata = TaskMetadata( + name="ThuNewsClusteringP2P.v2", + dataset={ + "path": "C-MTEB/ThuNewsClusteringP2P", + "revision": "5798586b105c0434e4f0fe5e767abe619442cf93", + }, + description="Clustering of titles + abstracts from the THUCNews dataset", + reference="http://thuctc.thunlp.org/", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=("2006-01-01", "2007-01-01"), + form=["written"], + domains=["News"], + task_subtypes=["Thematic clustering", "Topic classification"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"test": NUM_SAMPLES}, + avg_character_length={}, + ) + + def dataset_transform(self): + ds = dict() + for split in self.metadata.eval_splits: + labels = list(itertools.chain.from_iterable(self.dataset[split]["labels"])) + sentences = list( + itertools.chain.from_iterable(self.dataset[split]["sentences"]) + ) + ds[split] = Dataset.from_dict({"labels": labels, "sentences": sentences}) + self.dataset = DatasetDict(ds) + self.dataset = self.stratified_subsampling( + self.dataset, + self.seed, + self.metadata.eval_splits, + label="labels", + n_samples=NUM_SAMPLES, + ) + + +class ThuNewsClusteringS2S(AbsTaskClustering): + superseeded_by = "ThuNewsClusteringS2S.v2" + metadata = TaskMetadata( + name="ThuNewsClusteringS2S", + dataset={ + "path": "C-MTEB/ThuNewsClusteringS2S", + "revision": "8a8b2caeda43f39e13c4bc5bea0f8a667896e10d", + }, + description="Clustering of titles from the THUCNews dataset", + reference="http://thuctc.thunlp.org/", + type="Clustering", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=""" +@inproceedings{eisner2007proceedings, + title={Proceedings of the 2007 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL)}, + author={Eisner, Jason}, + booktitle={Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)}, + year={2007} +} +@inproceedings{li2006comparison, + title={A comparison and semi-quantitative analysis of words and character-bigrams as features in chinese text categorization}, + author={Li, Jingyang and Sun, Maosong and Zhang, Xian}, + booktitle={proceedings of the 21st international conference on computational linguistics and 44th annual meeting of the association for computational linguistics}, + pages={545--552}, + year={2006} +} +""", + n_samples={"test": 100000}, + avg_character_length=None, + ) + + +class ThuNewsClusteringP2P(AbsTaskClustering): + superseeded_by = "ThuNewsClusteringP2P.v2" + metadata = TaskMetadata( + name="ThuNewsClusteringP2P", + dataset={ + "path": "C-MTEB/ThuNewsClusteringP2P", + "revision": "5798586b105c0434e4f0fe5e767abe619442cf93", + }, + description="Clustering of titles + abstracts from the THUCNews dataset", + reference="http://thuctc.thunlp.org/", + type="Clustering", + category="p2p", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="v_measure", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=""" +@inproceedings{eisner2007proceedings, + title={Proceedings of the 2007 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL)}, + author={Eisner, Jason}, + booktitle={Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)}, + year={2007} +} +@inproceedings{li2006comparison, + title={A comparison and semi-quantitative analysis of words and character-bigrams as features in chinese text categorization}, + author={Li, Jingyang and Sun, Maosong and Zhang, Xian}, + booktitle={proceedings of the 21st international conference on computational linguistics and 44th annual meeting of the association for computational linguistics}, + pages={545--552}, + year={2006} +} +""", + n_samples={"test": 100000}, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/zho/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Clustering/zho/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0329080144874f0526ae6210065ae93c6f1137b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from .eng.Core17InstructionRetrieval import * +from .eng.News21InstructionRetrieval import * +from .eng.Robust04InstructionRetrieval import * diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/Core17InstructionRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/Core17InstructionRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..b03e7badc1d4f6da97e9ebd1dc864622740c1108 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/Core17InstructionRetrieval.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskInstructionRetrieval import AbsTaskInstructionRetrieval + + +class Core17InstructionRetrieval(AbsTaskInstructionRetrieval): + metadata = TaskMetadata( + name="Core17InstructionRetrieval", + description="Measuring retrieval instruction following ability on Core17 narratives.", + reference="https://arxiv.org/abs/2403.15246", + dataset={ + "path": "jhu-clsp/core17-instructions", + "revision": "e39ff896cf3efbbdeeb950e6bd7c79f266995b07", + }, + type="InstructionRetrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="p-MRR", + date=("2023-08-01", "2024-04-01"), + form=["written"], + domains=["News"], + task_subtypes=[], + license="MIT", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{weller2024followir, + title={FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions}, + author={Orion Weller and Benjamin Chang and Sean MacAvaney and Kyle Lo and Arman Cohan and Benjamin Van Durme and Dawn Lawrie and Luca Soldaini}, + year={2024}, + eprint={2403.15246}, + archivePrefix={arXiv}, + primaryClass={cs.IR} +}""", + n_samples={"eng": 19919 * 2}, + avg_character_length={"eng": 2768.749235474006}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/News21InstructionRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/News21InstructionRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..fe2059c260802f4c808a4d56e6d955ea9d03141b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/News21InstructionRetrieval.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskInstructionRetrieval import AbsTaskInstructionRetrieval + + +class News21InstructionRetrieval(AbsTaskInstructionRetrieval): + metadata = TaskMetadata( + name="News21InstructionRetrieval", + description="Measuring retrieval instruction following ability on News21 narratives.", + reference="https://arxiv.org/abs/2403.15246", + dataset={ + "path": "jhu-clsp/news21-instructions", + "revision": "e0144086b45fe31ac125e9ac1a83b6a409bb6ca6", + }, + type="InstructionRetrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="p-MRR", + date=("2023-08-01", "2024-04-01"), + form=["written"], + domains=["News"], + task_subtypes=[], + license="MIT", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{weller2024followir, + title={FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions}, + author={Orion Weller and Benjamin Chang and Sean MacAvaney and Kyle Lo and Arman Cohan and Benjamin Van Durme and Dawn Lawrie and Luca Soldaini}, + year={2024}, + eprint={2403.15246}, + archivePrefix={arXiv}, + primaryClass={cs.IR} +}""", + n_samples={"eng": 30953 * 2}, + avg_character_length={"eng": 2983.724665391969}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/Robust04InstructionRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/Robust04InstructionRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d5152fba942cc30d2d4df1b61d668b9ea5b4e74a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/Robust04InstructionRetrieval.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskInstructionRetrieval import AbsTaskInstructionRetrieval + + +class Robust04InstructionRetrieval(AbsTaskInstructionRetrieval): + metadata = TaskMetadata( + name="Robust04InstructionRetrieval", + description="Measuring retrieval instruction following ability on Robust04 narratives.", + reference="https://arxiv.org/abs/2403.15246", + dataset={ + "path": "jhu-clsp/robust04-instructions", + "revision": "a5a1c4fe2bc528ac12e83f8cdf82178da85d2f1d", + }, + type="InstructionRetrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="p-MRR", + date=("2023-08-01", "2024-04-01"), + form=["written"], + domains=["News"], + task_subtypes=[], + license="MIT", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{weller2024followir, + title={FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions}, + author={Orion Weller and Benjamin Chang and Sean MacAvaney and Kyle Lo and Arman Cohan and Benjamin Van Durme and Dawn Lawrie and Luca Soldaini}, + year={2024}, + eprint={2403.15246}, + archivePrefix={arXiv}, + primaryClass={cs.IR} +}""", + n_samples={"eng": 47544 * 2}, + avg_character_length={"eng": 2471.0398058252426}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/InstructionRetrieval/eng/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8960e3f446ff1be9afb166a93f7f9e30448d7826 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/__init__.py @@ -0,0 +1,3 @@ +from .mlt.MalteseNewsClassification import * +from .multilingual.MultiEURLEXMultilabelClassification import * +from .por.BrazilianToxicTweetsClassification import * diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/mlt/MalteseNewsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/mlt/MalteseNewsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..eec6fbc0859f95c9a9f42e9018e0d3af1c1d469f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/mlt/MalteseNewsClassification.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskMultilabelClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class MalteseNewsClassification(AbsTaskMultilabelClassification): + metadata = TaskMetadata( + name="MalteseNewsClassification", + description="""A multi-label topic classification dataset for Maltese News + Articles. The data was collected from the press_mt subset from Korpus + Malti v4.0. Article contents were cleaned to filter out JavaScript, CSS, + & repeated non-Maltese sub-headings. The labels are based on the category + field from this corpus. + """, + reference="https://huggingface.co/datasets/MLRS/maltese_news_categories", + dataset={ + "path": "MLRS/maltese_news_categories", + "revision": "6bb0321659c4f07c4c2176c30c98c971be6571b4", + }, + type="MultilabelClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["mlt-Latn"], + main_score="accuracy", + date=("2023-10-21", "2024-04-24"), + form=["written"], + domains=["Constructed"], + task_subtypes=["Topic classification"], + license="cc-by-nc-sa-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{maltese-news-datasets, + title = "Topic Classification and Headline Generation for {M}altese using a Public News Corpus", + author = "Chaudhary, Amit Kumar and + Micallef, Kurt and + Borg, Claudia", + booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation", + month = may, + year = "2024", + publisher = "Association for Computational Linguistics", + }""", + n_samples={"train": 10784, "test": 2297}, + avg_character_length={"train": 1595.63, "test": 1752.1}, + ) + + def dataset_transform(self): + self.dataset = self.dataset.rename_columns({"labels": "label"}) + remove_cols = [ + col + for col in self.dataset["test"].column_names + if col not in ["text", "label"] + ] + self.dataset = self.dataset.remove_columns(remove_cols) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/multilingual/MultiEURLEXMultilabelClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/multilingual/MultiEURLEXMultilabelClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..8a2a7fb77b9d4e3697f9cc47e6cf676a4672aaec --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/multilingual/MultiEURLEXMultilabelClassification.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskMultilabelClassification, MultilingualTask + + +class MultiEURLEXMultilabelClassification( + MultilingualTask, AbsTaskMultilabelClassification +): + metadata = TaskMetadata( + name="MultiEURLEXMultilabelClassification", + dataset={ + "path": "mteb/eurlex-multilingual", + "revision": "2aea5a6dc8fdcfeca41d0fb963c0a338930bde5c", + }, + description="EU laws in 23 EU languages containing gold labels.", + reference="https://huggingface.co/datasets/coastalcph/multi_eurlex", + category="p2p", + type="MultilabelClassification", + eval_splits=["test"], + eval_langs={ + "en": ["eng-Latn"], + "de": ["deu-Latn"], + "fr": ["fra-Latn"], + "it": ["ita-Latn"], + "es": ["spa-Latn"], + "pl": ["pol-Latn"], + "ro": ["ron-Latn"], + "nl": ["nld-Latn"], + "el": ["ell-Grek"], + "hu": ["hun-Latn"], + "pt": ["por-Latn"], + "cs": ["ces-Latn"], + "sv": ["swe-Latn"], + "bg": ["bul-Cyrl"], + "da": ["dan-Latn"], + "fi": ["fin-Latn"], + "sk": ["slk-Latn"], + "lt": ["lit-Latn"], + "hr": ["hrv-Latn"], + "sl": ["slv-Latn"], + "et": ["est-Latn"], + "lv": ["lav-Latn"], + "mt": ["mlt-Latn"], + }, + main_score="accuracy", + date=("1958-01-01", "2016-01-01"), + form=["written"], + domains=["Legal", "Government"], + task_subtypes=["Topic classification"], + license="CC BY-SA 4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" +@InProceedings{chalkidis-etal-2021-multieurlex, + author = {Chalkidis, Ilias + and Fergadiotis, Manos + and Androutsopoulos, Ion}, + title = {MultiEURLEX -- A multi-lingual and multi-label legal document + classification dataset for zero-shot cross-lingual transfer}, + booktitle = {Proceedings of the 2021 Conference on Empirical Methods + in Natural Language Processing}, + year = {2021}, + publisher = {Association for Computational Linguistics}, + location = {Punta Cana, Dominican Republic}, + url = {https://arxiv.org/abs/2109.00904} +} + """, + n_samples={"test": 5000}, + avg_character_length={"test": 12014.41}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/por/BrazilianToxicTweetsClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/por/BrazilianToxicTweetsClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..10f2710b62ca3e8400264f2d5d1e169051ff4e1c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/MultiLabelClassification/por/BrazilianToxicTweetsClassification.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from mteb.abstasks import AbsTaskMultilabelClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class BrazilianToxicTweetsClassification(AbsTaskMultilabelClassification): + metadata = TaskMetadata( + name="BrazilianToxicTweetsClassification", + description=""" + ToLD-Br is the biggest dataset for toxic tweets in Brazilian Portuguese, crowdsourced by 42 annotators selected from + a pool of 129 volunteers. Annotators were selected aiming to create a plural group in terms of demographics (ethnicity, + sexual orientation, age, gender). Each tweet was labeled by three annotators in 6 possible categories: LGBTQ+phobia, + Xenophobia, Obscene, Insult, Misogyny and Racism. + """, + reference="https://paperswithcode.com/dataset/told-br", + dataset={ + "path": "JAugusto97/told-br", + "revision": "fb4f11a5bc68b99891852d20f1ec074be6289768", + "name": "multilabel", + }, + type="MultilabelClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["por-Latn"], + main_score="accuracy", + date=("2019-08-01", "2019-08-16"), + form=["written"], + domains=["Constructed"], + task_subtypes=["Sentiment/Hate speech"], + license="CC BY-SA 4.0", + socioeconomic_status="medium", + annotations_creators="expert-annotated", + dialect=["brazilian"], + text_creation="found", + bibtex_citation="""@article{DBLP:journals/corr/abs-2010-04543, + author = {Joao Augusto Leite and + Diego F. Silva and + Kalina Bontcheva and + Carolina Scarton}, + title = {Toxic Language Detection in Social Media for Brazilian Portuguese: + New Dataset and Multilingual Analysis}, + journal = {CoRR}, + volume = {abs/2010.04543}, + year = {2020}, + url = {https://arxiv.org/abs/2010.04543}, + eprinttype = {arXiv}, + eprint = {2010.04543}, + timestamp = {Tue, 15 Dec 2020 16:10:16 +0100}, + }""", + n_samples={"test": 2048}, + avg_character_length={"test": 85.05}, + ) + + def dataset_transform(self): + cols_ = ["homophobia", "obscene", "insult", "racism", "misogyny", "xenophobia"] + n_size = len(self.dataset["train"]) + labels = [[] for _ in range(n_size)] + for c in cols_: + col_list = self.dataset["train"][c] + for i in range(n_size): + if col_list[i] > 0: + labels[i].append(c) + self.dataset = self.dataset["train"].add_column("label", labels) + del labels + self.dataset = self.dataset.remove_columns(cols_) + self.dataset = self.dataset.train_test_split( + train_size=2048, test_size=2048, seed=self.seed + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2352fb79afb838708b5ccebb57f8f80d3c8ca4ce --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/__init__.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from .ara.ArEntail import * +from .ces.CTKFactsNLI import * +from .deu.FalseFriendsDeEnPC import * +from .eng.LegalBenchPC import * +from .eng.SprintDuplicateQuestionsPC import * +from .eng.TwitterSemEval2015PC import * +from .eng.TwitterURLCorpusPC import * +from .fas.FarsTail import * +from .hye.ArmenianParaphrasePC import * +from .ind.IndoNLI import * +from .kor.KlueNLI import * +from .multilingual.OpusparcusPC import * +from .multilingual.PawsX import * +from .multilingual.RTE3 import * +from .multilingual.XNLI import * +from .multilingual.XStance import * +from .pol.PolishPC import * +from .por.Assin2RTE import * +from .por.SickBrPC import * +from .zho.CMTEBPairClassification import * diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ara/ArEntail.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ara/ArEntail.py new file mode 100644 index 0000000000000000000000000000000000000000..9116f3970190862532a70fa9549e9cb9226a10b5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ara/ArEntail.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class ArEntail(AbsTaskPairClassification): + metadata = TaskMetadata( + name="ArEntail", + dataset={ + "path": "arbml/ArEntail", + "revision": "4da4316c6e3287746ab74ff67dd252ad128fceff", + }, + description="A manually-curated Arabic natural language inference dataset from news headlines.", + reference="https://link.springer.com/article/10.1007/s10579-024-09731-1", + type="PairClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["ara-Arab"], + main_score="ap", + date=( + "2020-01-01", + "2024-03-04", + ), # best guess based on google searching random samples + form=["written"], + domains=["News"], + task_subtypes=["Textual Entailment"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@article{obeidat2024arentail, + title={ArEntail: manually-curated Arabic natural language inference dataset from news headlines}, + author={Obeidat, Rasha and Al-Harahsheh, Yara and Al-Ayyoub, Mahmoud and Gharaibeh, Maram}, + journal={Language Resources and Evaluation}, + pages={1--27}, + year={2024}, + publisher={Springer} + }""", + n_samples={"test": 1000}, + avg_character_length={"test": 65.77}, + ) + + def dataset_transform(self): + _dataset = {} + for split in self.metadata.eval_splits: + _dataset[split] = [ + { + "sent1": self.dataset[split]["premise"], + "sent2": self.dataset[split]["hypothesis"], + "labels": self.dataset[split]["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ces/CTKFactsNLI.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ces/CTKFactsNLI.py new file mode 100644 index 0000000000000000000000000000000000000000..96082c0871873c6665b638a5a7517374109cacbe --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ces/CTKFactsNLI.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class CTKFactsNLI(AbsTaskPairClassification): + metadata = TaskMetadata( + name="CTKFactsNLI", + dataset={ + "path": "ctu-aic/ctkfacts_nli", + "revision": "387ae4582c8054cb52ef57ef0941f19bd8012abf", + }, + description="Czech Natural Language Inference dataset of around 3K evidence-claim pairs labelled with SUPPORTS, REFUTES or NOT ENOUGH INFO veracity labels. Extracted from a round of fact-checking experiments.", + reference="https://arxiv.org/abs/2201.11115", + type="PairClassification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["ces-Latn"], + main_score="ap", + date=("2020-09-01", "2021-08-31"), # academic year 2020/2021 + form=["written"], + domains=["News"], + task_subtypes=["Claim verification"], + license="CC-BY-SA-3.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@article{ullrich2023csfever, + title={CsFEVER and CTKFacts: acquiring Czech data for fact verification}, + author={Ullrich, Herbert and Drchal, Jan and R{\`y}par, Martin and Vincourov{\'a}, Hana and Moravec, V{\'a}clav}, + journal={Language Resources and Evaluation}, + volume={57}, + number={4}, + pages={1571--1605}, + year={2023}, + publisher={Springer} + }""", + n_samples={ + "test": 375, + "validation": 305, + }, # after removing label 1=NOT ENOUGH INFO + avg_character_length={"test": 225.62, "validation": 219.32}, + ) + + def dataset_transform(self): + _dataset = {} + self.dataset.pop("train") + # keep labels 0=REFUTES and 2=SUPPORTS, and map them as 0 and 1 for binary classification + hf_dataset = self.dataset.filter(lambda x: x["label"] in [0, 2]) + hf_dataset = hf_dataset.map( + lambda example: {"label": 1 if example["label"] == 2 else 0} + ) + for split in self.metadata.eval_splits: + _dataset[split] = [ + { + "sent1": hf_dataset[split]["evidence"], + "sent2": hf_dataset[split]["claim"], + "labels": hf_dataset[split]["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/deu/FalseFriendsDeEnPC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/deu/FalseFriendsDeEnPC.py new file mode 100644 index 0000000000000000000000000000000000000000..4cf5b81d8c6709034b9632b3800ade87d15da43f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/deu/FalseFriendsDeEnPC.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification + + +class FalseFriendsDeEnPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="FalseFriendsGermanEnglish", + description="A dataset to identify False Friends / false cognates between English and German. A generally challenging task for multilingual models.", + reference="https://drive.google.com/file/d/1jgq0nBnV-UiYNxbKNrrr2gxDEHm-DMKH/view?usp=share_link", + dataset={ + "path": "aari1995/false_friends_de_en_mteb", + "revision": "15d6c030d3336cbb09de97b2cefc46db93262d40", + }, + type="PairClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="ap", + date=("2023-08-01", "2023-09-01"), + form=["written"], + domains=[], + task_subtypes=["False Friends"], + license="mit", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="created", + bibtex_citation=""" + @misc{Chibb_2022, + title="German-English False Friends in Multilingual Transformer Models: An Evaluation on Robustness and Word-to-Word Fine-Tuning", + author="Chibb, Aaron", + year="2022", + month="Sep" + abstract="This paper explores the robustness of multilingual language models against false friends. False friends are words that sound or are written the same in two different languages but have different meaning. Generally, it is argued that multilingual models, such as XLM-RoBERTA, can outperform monolingual models in most tasks on conventional datasets. However, false friends are not considered in these tests. In this paper, experiments with a false friends dataset show that multilingual models are not robust against false friends; they have problems creating monolingual representations and differentiating between meanings of similarly written words in different languages. An attempt of word-based finetuning multilingual models on false friends pairs is promising, however the results do not generally solve the presented problem and still, monolingual models are more robust against false friends." + } + """, + n_samples={"test": 1524}, + avg_character_length={"test": 40.3}, + ) + + def dataset_transform(self): + _dataset = {} + for split in self.metadata.eval_splits: + hf_dataset = self.dataset[split] + + _dataset[split] = [ + { + "sent1": hf_dataset["sent1"], + "sent2": hf_dataset["sent2"], + "labels": hf_dataset["labels"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/deu/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/deu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/deu/__init__.py @@ -0,0 +1 @@ + diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/LegalBenchPC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/LegalBenchPC.py new file mode 100644 index 0000000000000000000000000000000000000000..7e6d91fd85f26eb09d83cfbafdf99353013a5fda --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/LegalBenchPC.py @@ -0,0 +1,182 @@ +from __future__ import annotations + +from typing import Any + +import datasets + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +_DATASET_COLUMN_MAP = [ + { + "name": "citation_prediction_classification", + "sent1": "citation", + "sent2": "text", + "labels": "answer", + "mapping": {"yes": 1, "no": 0}, + }, + { + "name": "consumer_contracts_qa", + "sent1": "question", + "sent2": "contract", + "labels": "answer", + "mapping": {"yes": 1, "no": 0}, + }, + { + "name": "contract_qa", + "sent1": "question", + "sent2": "text", + "labels": "answer", + "mapping": {"yes": 1, "no": 0}, + }, + { + "name": "hearsay", + "sent1": "text", + "sent2": "slice", + "labels": "answer", + "mapping": {"yes": 1, "no": 0}, + }, + { + "name": "privacy_policy_entailment", + "sent1": "text", + "sent2": "description", + "labels": "answer", + "mapping": {"correct": 1, "incorrect": 0}, + }, + { + "name": "privacy_policy_qa", + "sent1": "text", + "sent2": "question", + "labels": "answer", + "mapping": {"relevant": 1, "irrelevant": 0}, + }, +] + + +class LegalBenchPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="LegalBenchPC", + description="""This LegalBench pair classification task is a combination of the following datasets: + + - Citation Prediction Classification: Given a legal statement and a case citation, determine if the citation is supportive of the legal statement. + - Consumer Contracts QA: The task consists of 400 yes/no questions relating to consumer contracts (specifically, online terms of service) and is relevant to the legal skill of contract interpretation. + - Contract QA: Answer yes/no questions about whether contractual clauses discuss particular issues like confidentiality requirements, BIPA consent, PII data breaches, breach of contract etc. + - Hearsay: Classify if a particular piece of evidence qualifies as hearsay. Each sample in the dataset describes (1) an issue being litigated or an assertion a party wishes to prove, and (2) a piece of evidence a party wishes to introduce. The goal is to determine if—as it relates to the issue—the evidence would be considered hearsay under the definition provided above. + - Privacy Policy Entailment: Given a privacy policy clause and a description of the clause, determine if the description is correct. This is a binary classification task in which the LLM is provided with a clause from a privacy policy, and a description of that clause (e.g., “The policy describes collection of the user’s HTTP cookies, flash cookies, pixel tags, or similar identifiers by a party to the contract.”). + - Privacy Policy QA: Given a question and a clause from a privacy policy, determine if the clause contains enough information to answer the question. This is a binary classification task in which the LLM is provided with a question (e.g., “do you publish my data”) and a clause from a privacy policy. The LLM must determine if the clause contains an answer to the question, and classify the question-clause pair. + """, + reference="https://huggingface.co/datasets/nguha/legalbench", + dataset={ + "path": "nguha/legalbench", + "revision": "12ca3b695563788fead87a982ad1a068284413f4", + }, + type="PairClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2000-01-01", "2023-08-23"), # best guess + form=["written"], + domains=["Legal"], + task_subtypes=[], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{guha2023legalbench, + title={LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models}, + author={Neel Guha and Julian Nyarko and Daniel E. Ho and Christopher Ré and Adam Chilton and Aditya Narayana and Alex Chohlas-Wood and Austin Peters and Brandon Waldon and Daniel N. Rockmore and Diego Zambrano and Dmitry Talisman and Enam Hoque and Faiz Surani and Frank Fagan and Galit Sarfaty and Gregory M. Dickinson and Haggai Porat and Jason Hegland and Jessica Wu and Joe Nudell and Joel Niklaus and John Nay and Jonathan H. Choi and Kevin Tobia and Margaret Hagan and Megan Ma and Michael Livermore and Nikon Rasumov-Rahe and Nils Holzenberger and Noam Kolt and Peter Henderson and Sean Rehaag and Sharad Goel and Shang Gao and Spencer Williams and Sunny Gandhi and Tom Zur and Varun Iyer and Zehua Li}, + year={2023}, + eprint={2308.11462}, + archivePrefix={arXiv}, + primaryClass={cs.CL} + } + @article{kolt2022predicting, + title={Predicting consumer contracts}, + author={Kolt, Noam}, + journal={Berkeley Tech. LJ}, + volume={37}, + pages={71}, + year={2022}, + publisher={HeinOnline} + } + @article{zimmeck2019maps, + title={Maps: Scaling privacy compliance analysis to a million apps}, + author={Zimmeck, Sebastian and Story, Peter and Smullen, Daniel and Ravichander, Abhilasha and Wang, Ziqi and Reidenberg, Joel R and Russell, N Cameron and Sadeh, Norman}, + journal={Proc. Priv. Enhancing Tech.}, + volume={2019}, + pages={66}, + year={2019} + } + @article{ravichander2019question, + title={Question answering for privacy policies: Combining computational and legal perspectives}, + author={Ravichander, Abhilasha and Black, Alan W and Wilson, Shomir and Norton, Thomas and Sadeh, Norman}, + journal={arXiv preprint arXiv:1911.00841}, + year={2019} + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 287.18}, + ) + + def load_data(self, **kwargs: Any) -> None: + """Load dataset from HuggingFace hub""" + if self.data_loaded: + return + + _hf_dataset = None + for dataset_col_map in _DATASET_COLUMN_MAP: + _dataset = datasets.load_dataset( + self.metadata_dict["dataset"]["path"], + dataset_col_map["name"], + revision=self.metadata_dict["dataset"]["revision"], + trust_remote_code=True, + ) + + _dataset = _dataset.rename_columns( + { + dataset_col_map["sent1"]: "sent1", + dataset_col_map["sent2"]: "sent2", + dataset_col_map["labels"]: "labels", + } + ) + _dataset = _dataset.select_columns(["labels", "sent1", "sent2"]) + mapping = dataset_col_map["mapping"] + _dataset = _dataset.map( + lambda example: { + "labels": mapping.get(example["labels"].lower(), example["labels"]) + } + ) + + if _hf_dataset is None: + _hf_dataset = _dataset + else: + _hf_dataset["train"] = datasets.concatenate_datasets( + [_hf_dataset["train"], _dataset["train"]] + ) + _hf_dataset["test"] = datasets.concatenate_datasets( + [_hf_dataset["test"], _dataset["test"]] + ) + + self.dataset = _hf_dataset + self.dataset_transform() + self.data_loaded = True + + def dataset_transform(self): + self.dataset = self.stratified_subsampling( + self.dataset, seed=self.seed, splits=["test"], label="labels" + ) + + _dataset = {} + for split in self.metadata.eval_splits: + hf_dataset = self.dataset[split] + _dataset[split] = [ + { + "sent1": hf_dataset["sent1"], + "sent2": hf_dataset["sent2"], + "labels": hf_dataset["labels"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/SprintDuplicateQuestionsPC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/SprintDuplicateQuestionsPC.py new file mode 100644 index 0000000000000000000000000000000000000000..726716d87f089b4d0c4bee732bf79626ffc89bd8 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/SprintDuplicateQuestionsPC.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification + + +class SprintDuplicateQuestionsPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="SprintDuplicateQuestions", + description="Duplicate questions from the Sprint community.", + reference="https://www.aclweb.org/anthology/D18-1131/", + dataset={ + "path": "mteb/sprintduplicatequestions-pairclassification", + "revision": "d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46", + }, + type="PairClassification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["eng-Latn"], + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"validation": 101000, "test": 101000}, + avg_character_length={"validation": 65.2, "test": 67.9}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/TwitterSemEval2015PC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/TwitterSemEval2015PC.py new file mode 100644 index 0000000000000000000000000000000000000000..56bbfc4be45626e254ec8f27d2277dd91b5f9fcd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/TwitterSemEval2015PC.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification + + +class TwitterSemEval2015PC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="TwitterSemEval2015", + dataset={ + "path": "mteb/twittersemeval2015-pairclassification", + "revision": "70970daeab8776df92f5ea462b6173c0b46fd2d1", + }, + description="Paraphrase-Pairs of Tweets from the SemEval 2015 workshop.", + reference="https://alt.qcri.org/semeval2015/task1/", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 16777}, + avg_character_length={"test": 38.3}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/TwitterURLCorpusPC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/TwitterURLCorpusPC.py new file mode 100644 index 0000000000000000000000000000000000000000..1796e254ac9c96a153799ffed6f33bf9c3ed85b0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/TwitterURLCorpusPC.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification + + +class TwitterURLCorpusPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="TwitterURLCorpus", + dataset={ + "path": "mteb/twitterurlcorpus-pairclassification", + "revision": "8b6510b0b1fa4e4c4f879467980e9be563ec1cdf", + }, + description="Paraphrase-Pairs of Tweets.", + reference="https://languagenet.github.io/", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 51534}, + avg_character_length={"test": 79.5}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/eng/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/fas/FarsTail.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/fas/FarsTail.py new file mode 100644 index 0000000000000000000000000000000000000000..956f6381cb1e5f8e7a3f9bafd8b935f5ca8d9fd7 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/fas/FarsTail.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class FarsTail(AbsTaskPairClassification): + metadata = TaskMetadata( + name="FarsTail", + dataset={ + "path": "azarijafari/FarsTail", + "revision": "7335288588f14e5a687d97fc979194c2abe6f4e7", + }, + description="This dataset, named FarsTail, includes 10,367 samples which are provided in both the Persian language as well as the indexed format to be useful for non-Persian researchers. The samples are generated from 3,539 multiple-choice questions with the least amount of annotator interventions in a way similar to the SciTail dataset", + reference="https://link.springer.com/article/10.1007/s00500-023-08959-3", + type="PairClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["fas-Arab"], + main_score="ap", + date=("2021-01-01", "2021-07-12"), # best guess + form=["written"], + domains=["Academic"], + task_subtypes=["Textual Entailment"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@article{amirkhani2023farstail, + title={FarsTail: a Persian natural language inference dataset}, + author={Amirkhani, Hossein and AzariJafari, Mohammad and Faridan-Jahromi, Soroush and Kouhkan, Zeinab and Pourjafari, Zohreh and Amirak, Azadeh}, + journal={Soft Computing}, + year={2023}, + publisher={Springer}, + doi={10.1007/s00500-023-08959-3} + }""", + n_samples={"test": 1029}, # after removing neutral + avg_character_length={"test": 125.84}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + path = self.metadata_dict["dataset"]["path"] + revision = self.metadata_dict["dataset"]["revision"] + data_files = { + "test": f"https://huggingface.co/datasets/{path}/resolve/{revision}/data/Test-word.csv" + } + self.dataset = datasets.load_dataset( + "csv", data_files=data_files, delimiter="\t" + ) + self.dataset_transform() + self.data_loaded = True + + def dataset_transform(self): + _dataset = {} + self.dataset = self.dataset.filter(lambda x: x["label"] != "n") + self.dataset = self.dataset.map( + lambda example: {"label": 1 if example["label"] == "e" else 0} + ) + for split in self.metadata.eval_splits: + _dataset[split] = [ + { + "sent1": self.dataset[split]["premise"], + "sent2": self.dataset[split]["hypothesis"], + "labels": self.dataset[split]["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/hye/ArmenianParaphrasePC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/hye/ArmenianParaphrasePC.py new file mode 100644 index 0000000000000000000000000000000000000000..aac196158e37b27b6ead598831f17b1cddfaf990 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/hye/ArmenianParaphrasePC.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class ArmenianParaphrasePC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="ArmenianParaphrasePC", + description="asparius/Armenian-Paraphrase-PC", + reference="https://github.com/ivannikov-lab/arpa-paraphrase-corpus", + dataset={ + "path": "asparius/Armenian-Paraphrase-PC", + "revision": "f43b4f32987048043a8b31e5e26be4d360c2438f", + }, + type="PairClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["hye-Armn"], + main_score="ap", + date=("2021-01-01", "2022-04-06"), + form=["written"], + domains=["News"], + task_subtypes=[], + license="Apache-2.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @misc{malajyan2020arpa, + title={ARPA: Armenian Paraphrase Detection Corpus and Models}, + author={Arthur Malajyan and Karen Avetisyan and Tsolak Ghukasyan}, + year={2020}, + eprint={2009.12615}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} + """, + n_samples={"train": 4023, "test": 1470}, + avg_character_length={"train": 243.81, "test": 241.37}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/hye/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/hye/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ind/IndoNLI.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ind/IndoNLI.py new file mode 100644 index 0000000000000000000000000000000000000000..f0661879759a3579167390efc74a3e26f6a9cab5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/ind/IndoNLI.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class IndoNLI(AbsTaskPairClassification): + metadata = TaskMetadata( + name="indonli", + dataset={ + "path": "afaji/indonli", + "revision": "3c976110fc13596004dc36279fc4c453ff2c18aa", + }, + description="IndoNLI is the first human-elicited Natural Language Inference (NLI) dataset for Indonesian. IndoNLI is annotated by both crowd workers and experts.", + reference="https://link.springer.com/chapter/10.1007/978-3-030-41505-1_39", + type="PairClassification", + category="s2s", + eval_splits=["test_expert"], + eval_langs=["ind-Latn"], + main_score="ap", + date=("2021-01-01", "2021-11-01"), # best guess + form=["written"], + domains=["Encyclopaedic", "Web", "News"], + task_subtypes=["Textual Entailment"], + license="CC-BY-SA 4.0", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{mahendra-etal-2021-indonli, + title = "{I}ndo{NLI}: A Natural Language Inference Dataset for {I}ndonesian", + author = "Mahendra, Rahmad and Aji, Alham Fikri and Louvan, Samuel and Rahman, Fahrurrozi and Vania, Clara", + booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", + month = nov, + year = "2021", + address = "Online and Punta Cana, Dominican Republic", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2021.emnlp-main.821", + pages = "10511--10527", + }""", + n_samples={"test_expert": 2040}, # after removing neutral + avg_character_length={"test_expert": 145.88}, + ) + + def dataset_transform(self): + _dataset = {} + for split in self.metadata.eval_splits: + # keep labels 0=entailment and 2=contradiction, and map them as 1 and 0 for binary classification + hf_dataset = self.dataset[split].filter(lambda x: x["label"] in [0, 2]) + hf_dataset = hf_dataset.map( + lambda example: {"label": 0 if example["label"] == 2 else 1} + ) + _dataset[split] = [ + { + "sent1": hf_dataset["premise"], + "sent2": hf_dataset["hypothesis"], + "labels": hf_dataset["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/kor/KlueNLI.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/kor/KlueNLI.py new file mode 100644 index 0000000000000000000000000000000000000000..6d0a6fa0731c88e1c81ebf55b87e6787db8ef6f3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/kor/KlueNLI.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class KlueNLI(AbsTaskPairClassification): + metadata = TaskMetadata( + name="KLUE-NLI", + dataset={ + "path": "klue", + "name": "nli", + "revision": "349481ec73fff722f88e0453ca05c77a447d967c", + }, + description="Textual Entailment between a hypothesis sentence and a premise sentence. Part of the Korean Language Understanding Evaluation (KLUE).", + reference="https://arxiv.org/abs/2105.09680", + type="PairClassification", + category="s2s", + eval_splits=["validation"], + eval_langs=["kor-Hang"], + main_score="ap", + date=("2016-01-01", "2020-12-31"), + form=["written"], + domains=["News", "Encyclopaedic"], + task_subtypes=["Textual Entailment"], + license="CC-BY-SA-4.0", + socioeconomic_status="high", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{park2021klue, + title={KLUE: Korean Language Understanding Evaluation}, + author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho}, + year={2021}, + eprint={2105.09680}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +}""", + n_samples={"validation": 2000}, # 3000 - neutral samples + avg_character_length={"validation": 35.01}, + ) + + def dataset_transform(self): + _dataset = {} + for split in self.metadata.eval_splits: + # keep labels 0=entailment and 2=contradiction, and map them as 1 and 0 for binary classification + hf_dataset = self.dataset[split].filter(lambda x: x["label"] in [0, 2]) + hf_dataset = hf_dataset.map( + lambda example: {"label": 0 if example["label"] == 2 else 1} + ) + _dataset[split] = [ + { + "sent1": hf_dataset["premise"], + "sent2": hf_dataset["hypothesis"], + "labels": hf_dataset["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/OpusparcusPC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/OpusparcusPC.py new file mode 100644 index 0000000000000000000000000000000000000000..4771763bd08f31aaead13916756c4a15afecdf17 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/OpusparcusPC.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskPairClassification, MultilingualTask + +_LANGUAGES = { + "de": ["deu-Latn"], + "en": ["eng-Latn"], + "fi": ["fin-Latn"], + "fr": ["fra-Latn"], + "ru": ["rus-Cyrl"], + "sv": ["swe-Latn"], +} + + +class OpusparcusPC(AbsTaskPairClassification, MultilingualTask): + metadata = TaskMetadata( + name="OpusparcusPC", + dataset={ + "path": "GEM/opusparcus", + "revision": "9e9b1f8ef51616073f47f306f7f47dd91663f86a", + }, + description="Opusparcus is a paraphrase corpus for six European language: German, English, Finnish, French, Russian, and Swedish. The paraphrases consist of subtitles from movies and TV shows.", + reference="https://gem-benchmark.com/data_cards/opusparcus", + category="s2s", + type="PairClassification", + eval_splits=["test.full", "validation.full"], + eval_langs=_LANGUAGES, + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + """Load dataset from HuggingFace hub""" + if self.data_loaded: + return + self.dataset = {} + for lang in self.hf_subsets: + self.dataset[lang] = datasets.load_dataset( + lang=lang, + quality=100, + **self.metadata_dict["dataset"], + ) + self.dataset_transform(lang) + self.data_loaded = True + + def dataset_transform(self, lang): + for split in self.dataset[lang]: + # Renaming features + labels = self.dataset[lang][split]["annot_score"] + sent1 = self.dataset[lang][split]["input"] + sent2 = self.dataset[lang][split]["target"] + new_dict = {} + # Labels are a score between 1.0 and 4.0, and we need binary classification + labels = [ + 0 if label < 2.5 else 1 if label > 2.5 else 2.5 for label in labels + ] + # Get neutral label to delete them + neutral = [i for i, val in enumerate(labels) if val == 2.5] + for i in sorted(neutral, reverse=True): + del labels[i] + del sent1[i] + del sent2[i] + new_dict["labels"] = [labels] + new_dict["sent1"] = [sent1] + new_dict["sent2"] = [sent2] + self.dataset[lang][split] = datasets.Dataset.from_dict(new_dict) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/PawsX.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/PawsX.py new file mode 100644 index 0000000000000000000000000000000000000000..5af4e9f77d860328139932b32b7ddaba45f8e3e0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/PawsX.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification + + +class PawsX(MultilingualTask, AbsTaskPairClassification): + metadata = TaskMetadata( + name="PawsX", + dataset={ + "path": "paws-x", + "revision": "8a04d940a42cd40658986fdd8e3da561533a3646", + }, + description="", + reference="https://arxiv.org/abs/1908.11828", + category="s2s", + type="PairClassification", + eval_splits=["test", "validation"], + eval_langs={ + "de": ["deu-Latn"], + "en": ["eng-Latn"], + "es": ["spa-Latn"], + "fr": ["fra-Latn"], + "ja": ["jpn-Hira"], + "ko": ["kor-Hang"], + "zh": ["cmn-Hans"], + }, + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def dataset_transform(self): + _dataset = {} + for lang in self.hf_subsets: + _dataset[lang] = {} + for split in self.metadata.eval_splits: + hf_dataset = self.dataset[lang][split] + + _dataset[lang][split] = [ + { + "sent1": hf_dataset["sentence1"], + "sent2": hf_dataset["sentence2"], + "labels": hf_dataset["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/RTE3.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/RTE3.py new file mode 100644 index 0000000000000000000000000000000000000000..56a77c99a3dc69d1303dc3a5eede40dc645190f5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/RTE3.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks import MultilingualTask +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +_LANGS = { + "de": ["deu-Latn"], + "en": ["eng-Latn"], + "fr": ["fra-Latn"], + "it": ["ita-Latn"], +} + + +class RTE3(MultilingualTask, AbsTaskPairClassification): + metadata = TaskMetadata( + name="RTE3", + dataset={ + "path": "maximoss/rte3-multi", + "revision": "d94f96ca5a6798e20f5a77e566f7a288dc6138d7", + }, + description="Recognising Textual Entailment Challenge (RTE-3) aim to provide the NLP community with a benchmark to test progress in recognizing textual entailment", + reference="https://aclanthology.org/W07-1401/", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=_LANGS, + main_score="ap", + date=("2023-03-25", "2024-04-15"), + form=["written"], + domains=["News", "Web", "Encyclopaedic"], + task_subtypes=["Textual Entailment"], + license="cc-by-4.0", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{giampiccolo-etal-2007-third, + title = "The Third {PASCAL} Recognizing Textual Entailment Challenge", + author = "Giampiccolo, Danilo and + Magnini, Bernardo and + Dagan, Ido and + Dolan, Bill", + booktitle = "Proceedings of the {ACL}-{PASCAL} Workshop on Textual Entailment and Paraphrasing", + month = jun, + year = "2007", + address = "Prague", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/W07-1401", + pages = "1--9", + } + """, + n_samples={"test": 1923}, # sum of 4 languages after neutral filtering + avg_character_length={"test": 124.79}, + ) + + def load_data(self, **kwargs): + """Load dataset from HuggingFace hub""" + if self.data_loaded: + return + self.dataset = datasets.load_dataset( + self.metadata.dataset["path"], revision=self.metadata.dataset["revision"] + ) + self.dataset_transform() + self.data_loaded = True + + def dataset_transform(self): + _dataset = {} + for lang in self.langs: + _dataset[lang] = {} + for split in self.metadata.eval_splits: + # keep target language + hf_dataset = self.dataset[split].filter(lambda x: x["language"] == lang) + # keep labels 0=entailment and 2=contradiction, and map them as 1 and 0 for binary classification + hf_dataset = hf_dataset.filter(lambda x: x["label"] in [0, 2]) + hf_dataset = hf_dataset.map( + lambda example: {"label": 0 if example["label"] == 2 else 1} + ) + _dataset[lang][split] = [ + { + "sent1": hf_dataset["premise"], + "sent2": hf_dataset["hypothesis"], + "labels": hf_dataset["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/XNLI.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/XNLI.py new file mode 100644 index 0000000000000000000000000000000000000000..357c74f685f58211847e7b8210bc93bcc1276780 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/XNLI.py @@ -0,0 +1,174 @@ +from __future__ import annotations + +from mteb.abstasks import MultilingualTask +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +_LANGS = { + "ar": ["ara-Arab"], + "bg": ["bul-Cyrl"], + "de": ["deu-Latn"], + "el": ["ell-Grek"], + "en": ["eng-Latn"], + "es": ["spa-Latn"], + "fr": ["fra-Latn"], + "hi": ["hin-Deva"], + "ru": ["rus-Cyrl"], + "sw": ["swa-Latn"], + "th": ["tha-Thai"], + "tr": ["tur-Latn"], + "vi": ["vie-Latn"], + "zh": ["zho-Hans"], +} + + +class XNLI(MultilingualTask, AbsTaskPairClassification): + metadata = TaskMetadata( + name="XNLI", + dataset={ + "path": "mteb/xnli", + "revision": "09698e0180d87dc247ca447d3a1248b931ac0cdb", + }, + description="", + reference="https://aclanthology.org/D18-1269/", + category="s2s", + type="PairClassification", + eval_splits=["test", "validation"], + eval_langs=_LANGS, + main_score="ap", + date=("2018-01-01", "2018-11-04"), + form=["written"], + domains=["Non-fiction", "Fiction", "Government"], + task_subtypes=[], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + text_creation="created", + bibtex_citation="""@InProceedings{conneau2018xnli, + author = {Conneau, Alexis + and Rinott, Ruty + and Lample, Guillaume + and Williams, Adina + and Bowman, Samuel R. + and Schwenk, Holger + and Stoyanov, Veselin}, + title = {XNLI: Evaluating Cross-lingual Sentence Representations}, + booktitle = {Proceedings of the 2018 Conference on Empirical Methods + in Natural Language Processing}, + year = {2018}, + publisher = {Association for Computational Linguistics}, + location = {Brussels, Belgium}, + } + """, + n_samples={"validation": 2163, "test": 2460}, + avg_character_length={"validation": 106.5, "test": 106.5}, + ) + + def dataset_transform(self): + _dataset = {} + for lang in self.hf_subsets: + _dataset[lang] = {} + self.dataset[lang] = self.stratified_subsampling( + self.dataset[lang], seed=self.seed, splits=self.metadata.eval_splits + ) + for split in self.metadata.eval_splits: + # 0=entailment, 2=contradiction. Filter out neutral to match the task. + # Then map entailment as positive (1) and contradiction as negative (0). + hf_dataset = self.dataset[lang][split].filter( + lambda x: x["label"] in [0, 2] + ) + hf_dataset = hf_dataset.map( + lambda example: {"label": 0 if example["label"] == 2 else 1} + ) + + _dataset[lang][split] = [ + { + "sent1": hf_dataset["premise"], + "sent2": hf_dataset["hypothesis"], + "labels": hf_dataset["label"], + } + ] + self.dataset = _dataset + + +_LANGS_2 = { + "punjabi": ["pan-Guru"], + "gujrati": ["guj-Gujr"], + "kannada": ["kan-Knda"], + "assamese": ["asm-Beng"], + "bengali": ["ben-Beng"], + "marathi": ["mar-Deva"], + "bhojpuri": ["bho-Deva"], + "odiya": ["ory-Orya"], + "sanskrit": ["san-Deva"], + "tamil": ["tam-Taml"], + "turkish": ["tur-Latn"], + "greek": ["ell-Grek"], + "russian": ["rus-Cyrl"], +} + + +class XNLIV2(MultilingualTask, AbsTaskPairClassification): + metadata = TaskMetadata( + name="XNLIV2", + dataset={ + "path": "mteb/xnli2.0-multi-pair", + "revision": "5b7d477a8c62cdd18e2fed7e015497c20b4371ad", + }, + description=""" + This is subset of 'XNLI 2.0: Improving XNLI dataset and performance on Cross Lingual Understanding' + with languages that were not part of the original XNLI plus three (verified) languages that are not strongly covered in MTEB + """, + reference="https://arxiv.org/pdf/2301.06527", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=_LANGS_2, + main_score="ap", + date=("2018-01-01", "2018-11-04"), + form=["written"], + domains=["Non-fiction", "Fiction", "Government"], + task_subtypes=[], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + text_creation="machine-translated and verified", + bibtex_citation="""@inproceedings{upadhyay2023xnli, + title={XNLI 2.0: Improving XNLI dataset and performance on Cross Lingual Understanding (XLU)}, + author={Upadhyay, Ankit Kumar and Upadhya, Harsit Kumar}, + booktitle={2023 IEEE 8th International Conference for Convergence in Technology (I2CT)}, + pages={1--6}, + year={2023}, + organization={IEEE} + } + """, + n_samples={"test": 5010}, + avg_character_length={"test": 80.06}, # average of premise and hypothesis + ) + + def dataset_transform(self): + _dataset = {} + for lang in self.langs: + _dataset[lang] = {} + self.dataset[lang] = self.stratified_subsampling( + self.dataset[lang], seed=self.seed, splits=self.metadata.eval_splits + ) + for split in self.metadata.eval_splits: + # 0=entailment, 2=contradiction. Filter out neutral to match the task. + # Then map entailment as positive (1) and contradiction as negative (0). + hf_dataset = self.dataset[lang][split].filter( + lambda x: x["label"] in [0, 2] + ) + hf_dataset = hf_dataset.map( + lambda example: {"label": 0 if example["label"] == 2 else 1} + ) + _dataset[lang][split] = [ + { + "sent1": hf_dataset["premise"], + "sent2": hf_dataset["hypothesis"], + "labels": hf_dataset["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/XStance.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/XStance.py new file mode 100644 index 0000000000000000000000000000000000000000..19efac606502cd5afc978c7f5b8896313b8c243a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/XStance.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +from datasets import load_dataset +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification + + +class XStance(MultilingualTask, AbsTaskPairClassification): + metadata = TaskMetadata( + name="XStance", + dataset={ + "path": "ZurichNLP/x_stance", + "revision": "810604b9ad3aafdc6144597fdaa40f21a6f5f3de", + }, + description="A Multilingual Multi-Target Dataset for Stance Detection in French, German, and Italian.", + reference="https://github.com/ZurichNLP/xstance", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs={ + "de": ["deu-Latn"], + "fr": ["fra-Latn"], + "it": ["ita-Latn"], + }, + main_score="ap", + date=("2011-01-01", "2020-12-31"), + form=["written"], + domains=["Social"], + task_subtypes=["Political classification"], + license="cc by-nc 4.0", + socioeconomic_status="medium", + annotations_creators="human-annotated", + dialect=[], + text_creation="created", + bibtex_citation=""" + @inproceedings{vamvas2020xstance, + author = "Vamvas, Jannis and Sennrich, Rico", + title = "{X-Stance}: A Multilingual Multi-Target Dataset for Stance Detection", + booktitle = "Proceedings of the 5th Swiss Text Analytics Conference (SwissText) 16th Conference on Natural Language Processing (KONVENS)", + address = "Zurich, Switzerland", + year = "2020", + month = "jun", + url = "http://ceur-ws.org/Vol-2624/paper9.pdf" + } + """, + n_samples={"test": 2048}, + avg_character_length={"test": 152.41}, # length of`sent1` + `sent2` + ) + + def load_data(self, **kwargs): + """Load dataset from HuggingFace hub""" + if self.data_loaded: + return + + max_n_samples = 2048 + self.dataset = {} + path = self.metadata_dict["dataset"]["path"] + revision = self.metadata_dict["dataset"]["revision"] + raw_dataset = load_dataset(path, revision=revision) + + def convert_example(example): + return { + "sent1": example["question"], + "sent2": example["comment"], + "labels": 1 if example["label"] == "FAVOR" else 0, + } + + for lang in self.metadata.eval_langs: + self.dataset[lang] = {} + for split in self.metadata_dict["eval_splits"]: + # filter by language + self.dataset[lang][split] = raw_dataset[split].filter( + lambda row: row["language"] == lang + ) + + # reduce samples + if len(self.dataset[lang][split]) > max_n_samples: + # only de + fr are larger than 2048 samples + self.dataset[lang][split] = self.dataset[lang][split].select( + range(max_n_samples) + ) + + # convert examples + self.dataset[lang][split] = self.dataset[lang][split].map( + convert_example, + remove_columns=self.dataset[lang][split].column_names, + ) + + self.dataset_transform() + self.data_loaded = True + + def dataset_transform(self): + """Transform dataset into sentence-pair format""" + _dataset = {} + + for lang in self.metadata.eval_langs: + _dataset[lang] = {} + for split in self.metadata.eval_splits: + _dataset[lang][split] = [ + { + "sent1": self.dataset[lang][split]["sent1"], + "sent2": self.dataset[lang][split]["sent2"], + "labels": self.dataset[lang][split]["labels"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/multilingual/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/pol/PolishPC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/pol/PolishPC.py new file mode 100644 index 0000000000000000000000000000000000000000..8ecee8913f86a1a09feda2bb71c484ac6f534dcd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/pol/PolishPC.py @@ -0,0 +1,121 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskPairClassification import AbsTaskPairClassification + + +class SickePLPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="SICK-E-PL", + dataset={ + "path": "PL-MTEB/sicke-pl-pairclassification", + "revision": "71bba34b0ece6c56dfcf46d9758a27f7a90f17e9", + }, + description="Polish version of SICK dataset for textual entailment.", + reference="https://aclanthology.org/2020.lrec-1.207", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class PpcPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="PpcPC", + dataset={ + "path": "PL-MTEB/ppc-pairclassification", + "revision": "2c7d2df57801a591f6b1e3aaf042e7a04ec7d9f2", + }, + description="Polish Paraphrase Corpus", + reference="https://arxiv.org/pdf/2207.12759.pdf", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class CdscePC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="CDSC-E", + dataset={ + "path": "PL-MTEB/cdsce-pairclassification", + "revision": "0a3d4aa409b22f80eb22cbf59b492637637b536d", + }, + description="Compositional Distributional Semantics Corpus for textual entailment.", + reference="https://aclanthology.org/P17-1073.pdf", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class PscPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="PSC", + dataset={ + "path": "PL-MTEB/psc-pairclassification", + "revision": "d05a294af9e1d3ff2bfb6b714e08a24a6cabc669", + }, + description="Polish Summaries Corpus", + reference="http://www.lrec-conf.org/proceedings/lrec2014/pdf/1211_Paper.pdf", + category="s2s", + type="PairClassification", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ap", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/pol/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/pol/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/por/Assin2RTE.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/por/Assin2RTE.py new file mode 100644 index 0000000000000000000000000000000000000000..8f39221022ee8fd3f2191337928f20e6f8b26833 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/por/Assin2RTE.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class Assin2RTE(AbsTaskPairClassification): + metadata = TaskMetadata( + name="Assin2RTE", + dataset={ + "path": "nilc-nlp/assin2", + "revision": "0ff9c86779e06855536d8775ce5550550e1e5a2d", + }, + description="Recognizing Textual Entailment part of the ASSIN 2, an evaluation shared task collocated with STIL 2019.", + reference="https://link.springer.com/chapter/10.1007/978-3-030-41505-1_39", + type="PairClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["por-Latn"], + main_score="ap", + date=("2019-01-01", "2019-09-16"), # best guess + form=["written"], + domains=[], + task_subtypes=["Textual Entailment"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{real2020assin, + title={The assin 2 shared task: a quick overview}, + author={Real, Livy and Fonseca, Erick and Oliveira, Hugo Goncalo}, + booktitle={International Conference on Computational Processing of the Portuguese Language}, + pages={406--412}, + year={2020}, + organization={Springer} + }""", + n_samples={"test": 2448}, + avg_character_length={"test": 53.55}, + ) + + def dataset_transform(self): + _dataset = {} + self.dataset = self.stratified_subsampling( + self.dataset, + seed=self.seed, + splits=self.metadata.eval_splits, + label="entailment_judgment", + ) + for split in self.metadata.eval_splits: + _dataset[split] = [ + { + "sent1": self.dataset[split]["premise"], + "sent2": self.dataset[split]["hypothesis"], + "labels": self.dataset[split]["entailment_judgment"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/por/SickBrPC.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/por/SickBrPC.py new file mode 100644 index 0000000000000000000000000000000000000000..c1212f1ffa068b9b7fb8e98678c4fce0443dde6b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/por/SickBrPC.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + +N_SAMPLES = 1000 + + +class SickBrPC(AbsTaskPairClassification): + metadata = TaskMetadata( + name="SICK-BR-PC", + dataset={ + "path": "eduagarcia/sick-br", + "revision": "0cdfb1d51ef339011c067688a3b75b82f927c097", + }, + description="SICK-BR is a Portuguese inference corpus, human translated from SICK", + reference="https://linux.ime.usp.br/~thalen/SICK_PT.pdf", + type="PairClassification", + category="s2s", + eval_splits=["test"], + eval_langs=["por-Latn"], + main_score="ap", + date=("2018-01-01", "2018-09-01"), # rough estimate + form=["written"], + domains=["Web"], + task_subtypes=["Textual Entailment"], + license="unknown", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="human-translated and localized", + bibtex_citation=""" +@inproceedings{real18, + author="Real, Livy + and Rodrigues, Ana + and Vieira e Silva, Andressa + and Albiero, Beatriz + and Thalenberg, Bruna + and Guide, Bruno + and Silva, Cindy + and de Oliveira Lima, Guilherme + and C{\^a}mara, Igor C. S. + and Stanojevi{\'{c}}, Milo{\v{s}} + and Souza, Rodrigo + and de Paiva, Valeria" + year ="2018", + title="SICK-BR: A Portuguese Corpus for Inference", + booktitle="Computational Processing of the Portuguese Language. PROPOR 2018.", + doi ="10.1007/978-3-319-99722-3_31", + isbn="978-3-319-99722-3" +} + """, + n_samples={"test": N_SAMPLES}, + avg_character_length={"test": 54.89}, + ) + + def dataset_transform(self): + _dataset = {} + + # Do not process the subsets we won't use + self.dataset.pop("train") + self.dataset.pop("validation") + + self.dataset = self.stratified_subsampling( + self.dataset, + seed=self.seed, + splits=self.metadata.eval_splits, + label="entailment_label", + n_samples=N_SAMPLES, + ) + + for split in self.metadata.eval_splits: + print(self.dataset[split]["entailment_label"]) + # keep labels 0=entailment and 2=contradiction, and map them as 1 and 0 for binary classification + hf_dataset = self.dataset[split].filter( + lambda x: x["entailment_label"] in [0, 2] + ) + hf_dataset = hf_dataset.map( + lambda example: {"label": 0 if example["entailment_label"] == 2 else 1} + ) + _dataset[split] = [ + { + "sent1": hf_dataset["sentence_A"], + "sent2": hf_dataset["sentence_B"], + "labels": hf_dataset["label"], + } + ] + self.dataset = _dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/zho/CMTEBPairClassification.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/zho/CMTEBPairClassification.py new file mode 100644 index 0000000000000000000000000000000000000000..062eebdf390e4861f8825cfab86f8ea7de465373 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/zho/CMTEBPairClassification.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskPairClassification import AbsTaskPairClassification +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class Ocnli(AbsTaskPairClassification): + metadata = TaskMetadata( + name="Ocnli", + description="Original Chinese Natural Language Inference dataset", + reference="https://arxiv.org/abs/2010.05444", + dataset={ + "path": "C-MTEB/OCNLI", + "revision": "66e76a618a34d6d565d5538088562851e6daa7ec", + }, + type="PairClassification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class Cmnli(AbsTaskPairClassification): + metadata = TaskMetadata( + name="Cmnli", + description="Chinese Multi-Genre NLI", + reference="https://huggingface.co/datasets/clue/viewer/cmnli", + dataset={ + "path": "C-MTEB/CMNLI", + "revision": "41bc36f332156f7adc9e38f53777c959b2ae9766", + }, + type="PairClassification", + category="s2s", + eval_splits=["validation", "test"], + eval_langs=["cmn-Hans"], + main_score="accuracy", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/zho/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/PairClassification/zho/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0461aa6fdb44fa21a9b5391dfdf8e033344925d6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/__init__.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from .eng.AskUbuntuDupQuestions import * +from .eng.MindSmallReranking import * +from .eng.SciDocsReranking import * +from .eng.StackOverflowDupQuestions import * +from .fra.AlloprofReranking import * +from .fra.SyntecReranking import * +from .multilingual.MIRACLReranking import * +from .multilingual.WikipediaRerankingMultilingual import * +from .zho.CMTEBReranking import * diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/AskUbuntuDupQuestions.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/AskUbuntuDupQuestions.py new file mode 100644 index 0000000000000000000000000000000000000000..40ec8f870802a3d478c218511fc078fb260d5ed4 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/AskUbuntuDupQuestions.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskReranking import AbsTaskReranking + + +class AskUbuntuDupQuestions(AbsTaskReranking): + metadata = TaskMetadata( + name="AskUbuntuDupQuestions", + description="AskUbuntu Question Dataset - Questions from AskUbuntu with manual annotations marking pairs of questions as similar or non-similar", + reference="https://github.com/taolei87/askubuntu", + dataset={ + "path": "mteb/askubuntudupquestions-reranking", + "revision": "2000358ca161889fa9c082cb41daa8dcfb161a54", + }, + type="Reranking", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 2255}, + avg_character_length={"test": 52.5}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/MindSmallReranking.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/MindSmallReranking.py new file mode 100644 index 0000000000000000000000000000000000000000..6c3005189a147167a63911f793e0b5cdad062c5f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/MindSmallReranking.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskReranking import AbsTaskReranking + + +class MindSmallReranking(AbsTaskReranking): + metadata = TaskMetadata( + name="MindSmallReranking", + description="Microsoft News Dataset: A Large-Scale English Dataset for News Recommendation Research", + reference="https://msnews.github.io/assets/doc/ACL2020_MIND.pdf", + hf_hub_name="mteb/mind_small", + dataset={ + "path": "mteb/mind_small", + "revision": "59042f120c80e8afa9cdbb224f67076cec0fc9a7", + }, + type="Reranking", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 107968}, + avg_character_length={"test": 70.9}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/SciDocsReranking.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/SciDocsReranking.py new file mode 100644 index 0000000000000000000000000000000000000000..b06e65e00d060355563935f802ca51f746e59afa --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/SciDocsReranking.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskReranking import AbsTaskReranking + + +class SciDocsReranking(AbsTaskReranking): + metadata = TaskMetadata( + name="SciDocsRR", + description="Ranking of related scientific papers based on their title.", + reference="https://allenai.org/data/scidocs", + dataset={ + "path": "mteb/scidocs-reranking", + "revision": "d3c5e1fc0b855ab6097bf1cda04dd73947d7caab", + }, + type="Reranking", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=("2000-01-01", "2020-12-31"), # best guess + form=["written"], + domains=["Academic", "Non-fiction"], + task_subtypes=["Scientific Reranking"], + license="cc-by-4.0", + socioeconomic_status="high", + annotations_creators=None, + dialect=None, + text_creation="found", + bibtex_citation=""" +@inproceedings{cohan-etal-2020-specter, + title = "{SPECTER}: Document-level Representation Learning using Citation-informed Transformers", + author = "Cohan, Arman and + Feldman, Sergey and + Beltagy, Iz and + Downey, Doug and + Weld, Daniel", + editor = "Jurafsky, Dan and + Chai, Joyce and + Schluter, Natalie and + Tetreault, Joel", + booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", + month = jul, + year = "2020", + address = "Online", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2020.acl-main.207", + doi = "10.18653/v1/2020.acl-main.207", + pages = "2270--2282", + abstract = "Representation learning is a critical ingredient for natural language processing systems. Recent Transformer language models like BERT learn powerful textual representations, but these models are targeted towards token- and sentence-level training objectives and do not leverage information on inter-document relatedness, which limits their document-level representation power. For applications on scientific documents, such as classification and recommendation, accurate embeddings of documents are a necessity. We propose SPECTER, a new method to generate document-level embedding of scientific papers based on pretraining a Transformer language model on a powerful signal of document-level relatedness: the citation graph. Unlike existing pretrained language models, Specter can be easily applied to downstream applications without task-specific fine-tuning. Additionally, to encourage further research on document-level models, we introduce SciDocs, a new evaluation benchmark consisting of seven document-level tasks ranging from citation prediction, to document classification and recommendation. We show that Specter outperforms a variety of competitive baselines on the benchmark.", +} +""", + n_samples={"test": 19599}, + avg_character_length={"test": 69.0}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/StackOverflowDupQuestions.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/StackOverflowDupQuestions.py new file mode 100644 index 0000000000000000000000000000000000000000..e3f0670bc010190c701046a577b4b524d20198cf --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/StackOverflowDupQuestions.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskReranking import AbsTaskReranking + + +class StackOverflowDupQuestions(AbsTaskReranking): + metadata = TaskMetadata( + name="StackOverflowDupQuestions", + description="Stack Overflow Duplicate Questions Task for questions with the tags Java, JavaScript and Python", + reference="https://www.microsoft.com/en-us/research/uploads/prod/2019/03/nl4se18LinkSO.pdf", + dataset={ + "path": "mteb/stackoverflowdupquestions-reranking", + "revision": "e185fbe320c72810689fc5848eb6114e1ef5ec69", + }, + type="Reranking", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples={"test": 3467}, + avg_character_length={"test": 49.8}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/eng/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/AlloprofReranking.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/AlloprofReranking.py new file mode 100644 index 0000000000000000000000000000000000000000..208ddb70ec8c047ef2b00c6e4093f4dd12d17613 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/AlloprofReranking.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskReranking import AbsTaskReranking + + +class AlloprofReranking(AbsTaskReranking): + metadata = TaskMetadata( + name="AlloprofReranking", + description="This dataset was provided by AlloProf, an organisation in Quebec, Canada offering resources and a help forum curated by a large number of teachers to students on all subjects taught from in primary and secondary school", + reference="https://huggingface.co/datasets/antoinelb7/alloprof", + dataset={ + "path": "lyon-nlp/mteb-fr-reranking-alloprof-s2p", + "revision": "65393d0d7a08a10b4e348135e824f385d420b0fd", + }, + type="Reranking", + category="s2p", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="map", + date=("2020-01-01", "2023-04-14"), # supposition + form=["written"], + domains=["Web", "Academic"], + task_subtypes=None, + license="CC BY-NC-SA 4.0", + socioeconomic_status=None, + annotations_creators="expert-annotated", + dialect=None, + text_creation="found", + bibtex_citation="""@misc{lef23, + doi = {10.48550/ARXIV.2302.07738}, + url = {https://arxiv.org/abs/2302.07738}, + author = {Lefebvre-Brossard, Antoine and Gazaille, Stephane and Desmarais, Michel C.}, + keywords = {Computation and Language (cs.CL), Information Retrieval (cs.IR), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Alloprof: a new French question-answer education dataset and its use in an information retrieval case study}, + publisher = {arXiv}, + year = {2023}, + copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International} + }""", + n_samples={"test": 2316, "train": 9264}, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.dataset = datasets.load_dataset( + name="queries", + **self.metadata_dict["dataset"], + split=self.metadata.eval_splits[0], + ) + documents = datasets.load_dataset( + name="documents", **self.metadata_dict["dataset"], split="test" + ) + # replace documents ids in positive and negative column by their respective texts + doc_id2txt = dict(list(zip(documents["doc_id"], documents["text"]))) + + self.dataset = self.dataset.map( + lambda x: { + "positive": [doc_id2txt[docid] for docid in x["positive"]], + "negative": [doc_id2txt[docid] for docid in x["negative"]], + } + ) + self.dataset = datasets.DatasetDict({"test": self.dataset}) + + self.dataset_transform() + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/SyntecReranking.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/SyntecReranking.py new file mode 100644 index 0000000000000000000000000000000000000000..faf7e0d640dcf1255a400700fdbd222cdba48e4f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/SyntecReranking.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskReranking import AbsTaskReranking + + +class SyntecReranking(AbsTaskReranking): + metadata = TaskMetadata( + name="SyntecReranking", + description="This dataset has been built from the Syntec Collective bargaining agreement.", + reference="https://huggingface.co/datasets/lyon-nlp/mteb-fr-reranking-syntec-s2p", + dataset={ + "path": "lyon-nlp/mteb-fr-reranking-syntec-s2p", + "revision": "daf0863838cd9e3ba50544cdce3ac2b338a1b0ad", + }, + type="Reranking", + category="s2p", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="map", + date=("2022-12-01", "2022-12-02"), + form=["written"], + domains=["Legal"], + task_subtypes=None, + license="CC BY-NC-SA 4.0", + socioeconomic_status=None, + annotations_creators="human-annotated", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.dataset = datasets.load_dataset( + name="queries", + **self.metadata_dict["dataset"], + split=self.metadata.eval_splits[0], + ) + documents = datasets.load_dataset( + name="documents", **self.metadata_dict["dataset"], split="test" + ) + # replace documents ids in positive and negative column by their respective texts + doc_id2txt = dict(list(zip(documents["doc_id"], documents["text"]))) + + self.dataset = self.dataset.map( + lambda x: { + "positive": [doc_id2txt[docid] for docid in x["positive"]], + "negative": [doc_id2txt[docid] for docid in x["negative"]], + } + ) + self.dataset = datasets.DatasetDict({"test": self.dataset}) + + self.dataset_transform() + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/fra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/MIRACLReranking.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/MIRACLReranking.py new file mode 100644 index 0000000000000000000000000000000000000000..a1cba53a8cba566736d45b291ae8e61df20b6faa --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/MIRACLReranking.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskReranking import AbsTaskReranking + + +class MIRACLReranking(MultilingualTask, AbsTaskReranking): + metadata = TaskMetadata( + name="MIRACLReranking", + description="MIRACL (Multilingual Information Retrieval Across a Continuum of Languages) is a multilingual retrieval dataset that focuses on search across 18 different languages. This task focuses on the German and Spanish subset.", + reference="https://project-miracl.github.io/", + dataset={ + "path": "jinaai/miracl", + "revision": "d28a029f35c4ff7f616df47b0edf54e6882395e6", + }, + type="Reranking", + category="s2s", + eval_splits=["test"], + eval_langs={ + "de": ["deu-Latn"], + "es": ["spa-Latn"], + }, + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/WikipediaRerankingMultilingual.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/WikipediaRerankingMultilingual.py new file mode 100644 index 0000000000000000000000000000000000000000..2cdcd8f7fdb24f21fdb44218b4547f6420a85ef3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/WikipediaRerankingMultilingual.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskReranking import AbsTaskReranking + +_EVAL_LANGS = { + "bg": ["bul-Cyrl"], + "bn": ["ben-Beng"], + "cs": ["ces-Latn"], + "da": ["dan-Latn"], + "de": ["deu-Latn"], + "en": ["eng-Latn"], + "fa": ["fas-Arab"], + "fi": ["fin-Latn"], + "hi": ["hin-Deva"], + "it": ["ita-Latn"], + "nl": ["nld-Latn"], + "pt": ["por-Latn"], + "ro": ["ron-Latn"], + "sr": ["srp-Cyrl"], + "no": ["nor-Latn"], + "sv": ["swe-Latn"], +} + + +class WikipediaRerankingMultilingual(MultilingualTask, AbsTaskReranking): + metadata = TaskMetadata( + name="WikipediaRerankingMultilingual", + description="The dataset is derived from Cohere's wikipedia-2023-11 dataset and contains synthetically generated queries.", + reference="https://huggingface.co/datasets/ellamind/wikipedia-2023-11-reranking-multilingual", + hf_hub_name="ellamind/wikipedia-2023-11-reranking-multilingual", + dataset={ + "path": "ellamind/wikipedia-2023-11-reranking-multilingual", + "revision": "6268b37d6f975f2a134791ba2f250a91d0bdfb4f", + }, + type="Reranking", + category="s2p", + eval_splits=["test"], + eval_langs=_EVAL_LANGS, + main_score="map", + date=("2023-11-01", "2024-05-15"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=[], + license="cc-by-sa-3.0", + socioeconomic_status="mixed", + annotations_creators="LM-generated", + dialect=[], + text_creation="LM-generated and verified", + bibtex_citation="", + n_samples={ + "en": 1500, + "de": 1500, + "it": 1500, + "pt": 1500, + "nl": 1500, + "cs": 1500, + "ro": 1500, + "bg": 1500, + "sr": 1500, + "fi": 1500, + "da": 1500, + "fa": 1500, + "hi": 1500, + "bn": 1500, + "no": 1500, + "sv": 1500, + }, + avg_character_length={"test": 452}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/multilingual/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/zho/CMTEBReranking.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/zho/CMTEBReranking.py new file mode 100644 index 0000000000000000000000000000000000000000..db9fd521671c3a0cbf929dd202e447cac99cdb6d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/zho/CMTEBReranking.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskReranking import AbsTaskReranking +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class T2Reranking(AbsTaskReranking): + metadata = TaskMetadata( + name="T2Reranking", + description="T2Ranking: A large-scale Chinese Benchmark for Passage Ranking", + reference="https://arxiv.org/abs/2304.03679", + dataset={ + "path": "C-MTEB/T2Reranking", + "revision": "76631901a18387f85eaa53e5450019b87ad58ef9", + }, + type="Reranking", + category="s2s", + eval_splits=["dev"], + eval_langs=["cmn-Hans"], + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class MMarcoReranking(AbsTaskReranking): + metadata = TaskMetadata( + name="MMarcoReranking", + description="mMARCO is a multilingual version of the MS MARCO passage ranking dataset", + reference="https://github.com/unicamp-dl/mMARCO", + dataset={ + "path": "C-MTEB/Mmarco-reranking", + "revision": "8e0c766dbe9e16e1d221116a3f36795fbade07f6", + }, + type="Reranking", + category="s2s", + eval_splits=["dev"], + eval_langs=["cmn-Hans"], + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class CMedQAv1(AbsTaskReranking): + metadata = TaskMetadata( + name="CMedQAv1-reranking", + description="Chinese community medical question answering", + reference="https://github.com/zhangsheng93/cMedQA", + dataset={ + "path": "C-MTEB/CMedQAv1-reranking", + "revision": "8d7f1e942507dac42dc58017c1a001c3717da7df", + }, + type="Reranking", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + +class CMedQAv2(AbsTaskReranking): + metadata = TaskMetadata( + name="CMedQAv2-reranking", + description="Chinese community medical question answering", + reference="https://github.com/zhangsheng93/cMedQA2", + dataset={ + "path": "C-MTEB/CMedQAv2-reranking", + "revision": "23d186750531a14a0357ca22cd92d712fd512ea0", + }, + type="Reranking", + category="s2s", + eval_splits=["test"], + eval_langs=["cmn-Hans"], + main_score="map", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/zho/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Reranking/zho/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ara/SadeemQuestionRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ara/SadeemQuestionRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..a665c7d2dd392c4cc8841a9b211c6008ead46e4d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ara/SadeemQuestionRetrieval.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class SadeemQuestionRetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = "test" + + metadata = TaskMetadata( + name="SadeemQuestionRetrieval", + dataset={ + "path": "sadeem-ai/sadeem-ar-eval-retrieval-questions", + "revision": "3cb0752b182e5d5d740df547748b06663c8e0bd9", + "name": "test", + }, + reference="https://huggingface.co/datasets/sadeem-ai/sadeem-ar-eval-retrieval-questions", + description="SadeemQuestion: A Benchmark Data Set for Community Question-Retrieval Research", + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["ara-Arab"], + main_score="ndcg_at_10", + date=("2024-01-01", "2024-04-01"), + form=["written"], + domains=["written"], + task_subtypes=["Article retrieval"], + license="Not specified", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{sadeem-2024-ar-retrieval-questions, + title = "SadeemQuestionRetrieval: A New Benchmark for Arabic questions-based Articles Searching.", + author = "abubakr.soliman@sadeem.app" + } + """, + n_samples={_EVAL_SPLIT: 22979}, + avg_character_length={_EVAL_SPLIT: 500.0}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_list = datasets.load_dataset(**self.metadata_dict["dataset"])["queries"] + queries = {row["query-id"]: row["text"] for row in query_list} + + corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])["corpus"] + corpus = {row["corpus-id"]: {"text": row["text"]} for row in corpus_list} + + qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])["qrels"] + qrels = {row["query-id"]: {row["corpus-id"]: 1} for row in qrels_list} + + self.corpus = {self._EVAL_SPLIT: corpus} + self.queries = {self._EVAL_SPLIT: queries} + self.relevant_docs = {self._EVAL_SPLIT: qrels} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ara/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ara/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/CodeEditSearchRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/CodeEditSearchRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..bc52425d513a06ea33d8720cff80d9bcad66b4c6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/CodeEditSearchRetrieval.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks import MultilingualTask +from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval +from mteb.abstasks.TaskMetadata import TaskMetadata + +_LANGS = [ + "python", + "javascript", + "typescript", + "go", + "ruby", + "java", + "php", + "c", + "c++", + "rust", + "swift", + "scala", + "shell", +] + + +class CodeEditSearchRetrieval(MultilingualTask, AbsTaskRetrieval): + _EVAL_SPLIT = "train" + metadata = TaskMetadata( + name="CodeEditSearchRetrieval", + description="The dataset is a collection of unified diffs of code changes, paired with a short instruction that describes the change. The dataset is derived from the CommitPackFT dataset.", + reference="https://huggingface.co/datasets/cassanof/CodeEditSearch/viewer", + dataset={ + "path": "cassanof/CodeEditSearch", + "revision": "4e51c66e0939303f6928472f13ad0848b2a3f4c0", + }, + type="Retrieval", + category="p2p", + eval_splits=[_EVAL_SPLIT], + eval_langs={lang: [lang + "-Code"] for lang in _LANGS}, + main_score="ndcg_at_10", + date=("2011-02-12", "2016-01-01"), + form=["written"], + domains=["Programming"], + task_subtypes=["Code retrieval"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="@article{muennighoff2023octopack, title={OctoPack: Instruction Tuning Code Large Language Models}, author={Niklas Muennighoff and Qian Liu and Armel Zebaze and Qinkai Zheng and Binyuan Hui and Terry Yue Zhuo and Swayam Singh and Xiangru Tang and Leandro von Werra and Shayne Longpre}, journal={arXiv preprint arXiv:2308.07124}, year={2023} }", + n_samples={ + _EVAL_SPLIT: 1000 * len(_LANGS), + }, + avg_character_length={"train": 553.50}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + lang_subs = {lang: [] for lang in _LANGS} + for lang in _LANGS: + data = datasets.load_dataset( + split=self._EVAL_SPLIT, + data_dir=lang, + **self.metadata_dict["dataset"], + ) + for row in data: + lang_subs[lang].append(row) + + self.queries = {} + self.corpus = {} + self.relevant_docs = {} + + for lang, sub in lang_subs.items(): + sub = sub[ + : min( + len(sub), + self.metadata_dict["n_samples"][self._EVAL_SPLIT] / len(_LANGS), + ) + ] + + self.queries[lang] = { + self._EVAL_SPLIT: { + str(i): row["instruction"] for i, row in enumerate(sub) + } + } + self.corpus[lang] = { + self._EVAL_SPLIT: { + str(row["commit"]): {"text": row["diff"]} for row in sub + } + } + self.relevant_docs[lang] = { + self._EVAL_SPLIT: { + str(i): {row["commit"]: 1} for i, row in enumerate(sub) + } + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/CodeSearchNetRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/CodeSearchNetRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..ab74208c0de30f77a0f72f19539f1e1324d7fd99 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/CodeSearchNetRetrieval.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks import MultilingualTask +from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval +from mteb.abstasks.TaskMetadata import TaskMetadata + +_LANGS = ["python", "javascript", "go", "ruby", "java", "php"] + + +class CodeSearchNetRetrieval(MultilingualTask, AbsTaskRetrieval): + _EVAL_SPLIT = "test" + metadata = TaskMetadata( + name="CodeSearchNetRetrieval", + description="The dataset is a collection of code snippets and their corresponding natural language queries. The task is to retrieve the most relevant code snippet for a given query.", + reference="https://huggingface.co/datasets/code_search_net/viewer", + dataset={ + "path": "code_search_net", + "revision": "fdc6a9e39575768c27eb8a2a5f702bf846eb4759", + }, + type="Retrieval", + category="p2p", + eval_splits=[_EVAL_SPLIT], + eval_langs={lang: [lang + "-Code"] for lang in _LANGS}, + main_score="ndcg_at_10", + date=("2019-01-01", "2019-12-31"), + form=["written"], + domains=["Programming"], + task_subtypes=["Code retrieval"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="@article{husain2019codesearchnet, title={{CodeSearchNet} challenge: Evaluating the state of semantic code search}, author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, journal={arXiv preprint arXiv:1909.09436}, year={2019} }", + n_samples={ + _EVAL_SPLIT: 1000, + }, + avg_character_length={"test": 1196.4609}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + data = datasets.load_dataset( + split=self._EVAL_SPLIT, + trust_remote_code=True, + streaming=True, + **self.metadata_dict["dataset"], + ) + data = data.shuffle(seed=42) + + # remove any leaked labels. quite common in this dataset + data = data.map( + lambda ex: { + "func_code_string": ex["func_code_string"].replace( + ex["func_documentation_string"], "" + ) + } + ) + + lang_subs = {lang: [] for lang in _LANGS} + for ex in data: + lang_subs[ex["language"]].append(ex) + + self.queries = {} + self.corpus = {} + self.relevant_docs = {} + + for lang, sub in lang_subs.items(): + sub = sub[ + : min(len(sub), self.metadata_dict["n_samples"][self._EVAL_SPLIT]) + ] + + self.queries[lang] = { + self._EVAL_SPLIT: { + str(i): row["func_documentation_string"] + for i, row in enumerate(sub) + } + } + self.corpus[lang] = { + self._EVAL_SPLIT: { + str(row["func_code_url"]): {"text": row["func_code_string"]} + for row in sub + } + } + self.relevant_docs[lang] = { + self._EVAL_SPLIT: { + str(i): {row["func_code_url"]: 1} for i, row in enumerate(sub) + } + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/code/__init__.py @@ -0,0 +1 @@ + diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/dan_fever.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/dan_fever.py new file mode 100644 index 0000000000000000000000000000000000000000..35f0ba9d0ae40f24bd2fa346ded275c3417beb95 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/dan_fever.py @@ -0,0 +1,92 @@ +from mteb.abstasks import AbsTaskRetrieval, TaskMetadata + + +class DanFever(AbsTaskRetrieval): + metadata = TaskMetadata( + name="DanFEVER", + dataset={ + "path": "strombergnlp/danfever", + "revision": "5d01e3f6a661d48e127ab5d7e3aaa0dc8331438a", + }, + description="A Danish dataset intended for misinformation research. It follows the same format as the English FEVER dataset.", + reference="https://aclanthology.org/2021.nodalida-main.47/", + type="Retrieval", + category="p2p", + eval_splits=["train"], + eval_langs=["dan-Latn"], + main_score="ndcg_at_10", + date=("2020-01-01", "2021-12-31"), # best guess + form=["spoken"], + domains=["Encyclopaedic", "Non-fiction"], + license="CC BY-SA 4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" +@inproceedings{norregaard-derczynski-2021-danfever, + title = "{D}an{FEVER}: claim verification dataset for {D}anish", + author = "N{\o}rregaard, Jeppe and + Derczynski, Leon", + editor = "Dobnik, Simon and + {\O}vrelid, Lilja", + booktitle = "Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa)", + month = may # " 31--2 " # jun, + year = "2021", + address = "Reykjavik, Iceland (Online)", + publisher = {Link{\"o}ping University Electronic Press, Sweden}, + url = "https://aclanthology.org/2021.nodalida-main.47", + pages = "422--428", + abstract = "We present a dataset, DanFEVER, intended for multilingual misinformation research. The dataset is in Danish and has the same format as the well-known English FEVER dataset. It can be used for testing methods in multilingual settings, as well as for creating models in production for the Danish language.", +} +""", + n_samples={"train": 8897}, + avg_character_length={"train": 124.84}, + task_subtypes=["Claim verification"], + ) + + def dataset_transform(self) -> None: + """And transform to a retrieval datset, which have the following attributes + + self.corpus = Dict[doc_id, Dict[str, str]] #id => dict with document data like title and text + self.queries = Dict[query_id, str] #id => query + self.relevant_docs = Dict[query_id, Dict[[doc_id, score]] + """ + self.corpus = {} + self.relevant_docs = {} + self.queries = {} + text2id = {} + + for split in self.dataset: + self.corpus[split] = {} + self.relevant_docs[split] = {} + self.queries[split] = {} + + ds = self.dataset[split] + claims = ds["claim"] + evidences = ds["evidence_extract"] + labels = ds["label"] + class_labels = ds.features["label"].names + + for claim, evidence, label_id in zip(claims, evidences, labels): + claim_is_supported = class_labels[label_id] == "Supported" + + sim = ( + 1 if claim_is_supported else 0 + ) # negative for refutes claims - is that what we want? + + if claim not in text2id: + text2id[claim] = str(len(text2id)) + if evidence not in text2id: + text2id[evidence] = len(text2id) + + claim_id = str(text2id[claim]) + evidence_id = str(text2id[evidence]) + + self.queries[split][claim_id] = claim + self.corpus[split][evidence_id] = {"title": "", "text": evidence} + + if claim_id not in self.relevant_docs[split]: + self.relevant_docs[split][claim_id] = {} + + self.relevant_docs[split][claim_id][evidence_id] = sim diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/t2nord_retrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/t2nord_retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..58d60ce05f0f53876f15e542fd3787aa31c90132 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/t2nord_retrieval.py @@ -0,0 +1,71 @@ +import datasets + +from mteb.abstasks import AbsTaskRetrieval, TaskMetadata + + +class TV2Nordretrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="TV2Nordretrieval", + dataset={ + "path": "alexandrainst/nordjylland-news-summarization", + "revision": "80cdb115ec2ef46d4e926b252f2b59af62d6c070", + }, + description="News Article and corresponding summaries extracted from the Danish newspaper TV2 Nord.", + reference="https://huggingface.co/datasets/alexandrainst/nordjylland-news-summarization", + type="Retrieval", + category="p2p", + eval_splits=["test"], + eval_langs=["dan-Latn"], + main_score="ndcg_at_10", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["News", "Non-fiction"], + license="CC0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=None, + n_samples={"test": 4096}, + avg_character_length={"test": 784.11}, + task_subtypes=["Article retrieval"], + ) + + def dataset_transform(self) -> None: + """And transform to a retrieval datset, which have the following attributes + + self.corpus = Dict[doc_id, Dict[str, str]] #id => dict with document datas like title and text + self.queries = Dict[query_id, str] #id => query + self.relevant_docs = Dict[query_id, Dict[[doc_id, score]] + """ + self.corpus = {} + self.relevant_docs = {} + self.queries = {} + text2id = {} + + for split in self.dataset: + ds: datasets.Dataset = self.dataset[split] # type: ignore + ds = ds.shuffle(seed=42) + ds = ds.select( + range(2048) + ) # limit the dataset size to make sure the task does not take too long to run + self.queries[split] = {} + self.relevant_docs[split] = {} + self.corpus[split] = {} + + summary = ds["summary"] + article = ds["text"] + + n = 0 + for summ, art in zip(summary, article): + self.queries[split][str(n)] = summ + q_n = n + n += 1 + if art not in text2id: + text2id[art] = n + self.corpus[split][str(n)] = {"title": "", "text": art} + n += 1 + + self.relevant_docs[split][str(q_n)] = { + str(text2id[art]): 1 + } # only one correct matches diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/twitterhjerne.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/twitterhjerne.py new file mode 100644 index 0000000000000000000000000000000000000000..b30c6bde3f038533d99e17725def724fce6dd382 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/dan/twitterhjerne.py @@ -0,0 +1,92 @@ +import datasets + +from mteb.abstasks import AbsTaskRetrieval, TaskMetadata + + +class TwitterHjerneRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="TwitterHjerneRetrieval", + dataset={ + "path": "sorenmulli/da-hashtag-twitterhjerne", + "revision": "099ee143c7fdfa6bd7965be8c801cb161c313b29", + }, + description="Danish question asked on Twitter with the Hashtag #Twitterhjerne ('Twitter brain') and their corresponding answer.", + reference="https://huggingface.co/datasets/sorenmulli/da-hashtag-twitterhjerne", + type="Retrieval", + category="p2p", + eval_splits=["train"], + eval_langs=["dan-Latn"], + main_score="ndcg_at_10", + date=("2006-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["Social"], + license=None, + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" +@article{holm2024gllms, + title={Are GLLMs Danoliterate? Benchmarking Generative NLP in Danish}, + author={Holm, S{\o}ren Vejlgaard}, + year={2024} +} +""", + n_samples={"train": 340}, + avg_character_length={"train": 138.23}, + task_subtypes=["Question answering"], + ) + + def dataset_transform(self) -> None: + """And transform to a retrieval datset, which have the following attributes + + self.corpus = Dict[doc_id, Dict[str, str]] #id => dict with document datas like title and text + self.queries = Dict[query_id, str] #id => query + self.relevant_docs = Dict[query_id, Dict[[doc_id, score]] + """ + self.corpus = {} + self.relevant_docs = {} + self.queries = {} + text2id = {} + + for split in self.dataset: + ds: datasets.Dataset = self.dataset[split] # type: ignore + ds = ds.map(answers_to_list) + + self.queries[split] = {} + self.relevant_docs[split] = {} + self.corpus[split] = {} + + questions = ds["Question"] + answers = ds["answers"] + + n = 0 + for q, answ in zip(questions, answers): + if len(q.split(" ")) < 4 and answ: + continue + query_id = str(n) + self.queries[split][query_id] = q + n += 1 + answer_ids = [] + for a in answ: + if a not in text2id: + text2id[a] = n + answer_id = str(n) + self.corpus[split][answer_id] = {"title": "", "text": a} + n += 1 + else: + answer_id = str(text2id[a]) + answer_ids.append(answer_id) + + self.relevant_docs[split][query_id] = { + answer_id: 1 for answer_id in answer_ids + } + + +def answers_to_list(example: dict) -> dict: + example["answers"] = [ + v + for k, v in example.items() + if k.startswith("Answer") and v and len(v.split(" ")) > 3 + ] + return example diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GerDaLIRRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GerDaLIRRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..b39d5ccc34af44cb8e99db0140012fd855eeb435 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GerDaLIRRetrieval.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class GerDaLIR(AbsTaskRetrieval): + _EVAL_SPLIT = "test" + + metadata = TaskMetadata( + name="GerDaLIR", + description="GerDaLIR is a legal information retrieval dataset created from the Open Legal Data platform.", + reference="https://github.com/lavis-nlp/GerDaLIR", + dataset={ + "path": "jinaai/ger_da_lir", + "revision": "0bb47f1d73827e96964edb84dfe552f62f4fd5eb", + }, + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["deu-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_rows = datasets.load_dataset( + name="queries", + split=self._EVAL_SPLIT, + **self.metadata_dict["dataset"], + ) + corpus_rows = datasets.load_dataset( + name="corpus", + split=self._EVAL_SPLIT, + **self.metadata_dict["dataset"], + ) + qrels_rows = datasets.load_dataset( + name="qrels", + split=self._EVAL_SPLIT, + **self.metadata_dict["dataset"], + ) + + self.queries = { + self._EVAL_SPLIT: {row["_id"]: row["text"] for row in query_rows} + } + self.corpus = {self._EVAL_SPLIT: {row["_id"]: row for row in corpus_rows}} + self.relevant_docs = { + self._EVAL_SPLIT: { + row["_id"]: {v: 1 for v in row["text"].split(" ")} for row in qrels_rows + } + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GerDaLIRSmallRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GerDaLIRSmallRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..edac8e5ed898b5d9c125fea582e9c8762d8410f9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GerDaLIRSmallRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class GerDaLIRSmall(AbsTaskRetrieval): + metadata = TaskMetadata( + name="GerDaLIRSmall", + description="The dataset consists of documents, passages and relevance labels in German. In contrast to the original dataset, only documents that have corresponding queries in the query set are chosen to create a smaller corpus for evaluation purposes.", + reference="https://github.com/lavis-nlp/GerDaLIR", + dataset={ + "path": "mteb/GerDaLIRSmall", + "revision": "48327de6ee192e9610f3069789719788957c7abd", + }, + type="Retrieval", + category="p2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="ndcg_at_10", + date=None, + form=["written"], + domains=["Legal"], + task_subtypes=["Article retrieval"], + license="MIT license", + socioeconomic_status="high", + annotations_creators="derived", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanDPRRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanDPRRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..6361aa9664905274c9b9da972d01204b6a72cf6a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanDPRRetrieval.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class GermanDPR(AbsTaskRetrieval): + _EVAL_SPLIT = "test" + + metadata = TaskMetadata( + name="GermanDPR", + description="GermanDPR is a German Question Answering dataset for open-domain QA. It associates questions with a textual context containing the answer", + reference="https://huggingface.co/datasets/deepset/germandpr", + dataset={ + "path": "deepset/germandpr", + "revision": "5129d02422a66be600ac89cd3e8531b4f97d347d", + }, + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["deu-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @staticmethod + def _format_documents(docs, id_prefix="", existing_docs=None): + if existing_docs is None: + existing_docs = dict() + result = {} + for i, (title, content) in enumerate(zip(docs["title"], docs["text"])): + formatted_content = content.split("==\n")[-1].replace("\n", " ").lstrip() + if formatted_content in existing_docs: + id_value = existing_docs[formatted_content] + else: + id_value = f"{id_prefix}{i}" + existing_docs[formatted_content] = id_value + result[id_value] = {"title": title, "text": formatted_content} + return result + + def load_data(self, **kwargs): + if self.data_loaded: + return + + data = datasets.load_dataset( + split=self._EVAL_SPLIT, + **self.metadata_dict["dataset"], + ) + corpus = dict() + queries = dict() + relevant_docs = dict() + all_docs = dict() + for i, row in enumerate(data): + q_id = f"q_{i}" + queries[q_id] = row["question"] + pos_docs = self._format_documents( + row["positive_ctxs"], id_prefix=f"doc_{i}_p_", existing_docs=all_docs + ) + corpus.update(pos_docs) + neg_docs = self._format_documents( + row["hard_negative_ctxs"], + id_prefix=f"doc_{i}_n_", + existing_docs=all_docs, + ) + corpus.update(neg_docs) + relevant_docs[q_id] = {k: 1 for k in pos_docs} + self.queries = {self._EVAL_SPLIT: queries} + self.corpus = {self._EVAL_SPLIT: corpus} + self.relevant_docs = {self._EVAL_SPLIT: relevant_docs} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanGovServiceRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanGovServiceRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d083e3ce05766c68402487f3fdbcecc4210f0d6c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanGovServiceRetrieval.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import hashlib + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLIT = "test" + + +class GermanGovServiceRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="GermanGovServiceRetrieval", + description="LHM-Dienstleistungen-QA is a German question answering dataset for government services of the Munich city administration. It associates questions with a textual context containing the answer", + reference="https://huggingface.co/datasets/it-at-m/LHM-Dienstleistungen-QA", + dataset={ + "path": "it-at-m/LHM-Dienstleistungen-QA", + "revision": "ed40131b56ce86ce3666f2942953595dd9d29608", + }, + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["deu-Latn"], + main_score="ndcg_at_5", + date=("2022-11-01", "2022-11-30"), + form=["written"], + domains=["Government"], + task_subtypes=["Question answering"], + license="mit", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + bibtex_citation="""@software{lhm-dienstleistungen-qa, + author = {Schröder, Leon Marius and + Gutknecht, Clemens and + Alkiddeh, Oubada and + Susanne Weiß, + Lukas, Leon}, + title = {LHM-Dienstleistungen-QA - german public domain question-answering dataset}, + month = nov, + year = 2022, + publisher = {it@M}, + url = {https://huggingface.co/datasets/it-at-m/LHM-Dienstleistungen-QA} +}""", + text_creation="found", + n_samples={"test": 357}, + avg_character_length={"test": 1211.69}, + ) + + @staticmethod + def get_hash(input_str) -> str: + return hashlib.md5(input_str.encode("utf-8")).hexdigest() + + def load_data(self, **kwargs): + if self.data_loaded: + return + + dataset = datasets.load_dataset( + path=self.metadata_dict["dataset"]["path"], + split=_EVAL_SPLIT, + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + corpus = {} + queries = {} + relevant_docs = {} + + for row in dataset: # row: title, context, question, ... + # use hash values as IDs + d_id = "d_" + self.get_hash(row["title"] + row["context"]) + q_id = "q_" + self.get_hash(row["question"]) + + corpus[d_id] = { + "_id": d_id, + "title": row["title"], + "text": row["context"], + } + queries[q_id] = row["question"] + + if q_id not in relevant_docs: + relevant_docs[q_id] = {} + + relevant_docs[q_id][d_id] = 1 # 1 = relevant + + self.queries = {_EVAL_SPLIT: queries} + self.corpus = {_EVAL_SPLIT: corpus} + self.relevant_docs = {_EVAL_SPLIT: relevant_docs} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanQuADRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanQuADRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..9cb3d0ff05670847ae291fcec22a8b9dc789e824 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/GermanQuADRetrieval.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from collections import defaultdict + +from datasets import DatasetDict, load_dataset + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +def load_retrieval_data(dataset_path, eval_splits): + eval_split = eval_splits[0] + corpus_dataset = load_dataset(dataset_path, "corpus") + queries_dataset = load_dataset(dataset_path, "queries") + qrels = load_dataset(dataset_path + "-qrels")[eval_split] + + corpus = {e["_id"]: {"text": e["text"]} for e in corpus_dataset["corpus"]} + queries = {e["_id"]: e["text"] for e in queries_dataset["queries"]} + relevant_docs = defaultdict(dict) + for e in qrels: + relevant_docs[e["query-id"]][e["corpus-id"]] = e["score"] + + corpus = DatasetDict({eval_split: corpus}) + queries = DatasetDict({eval_split: queries}) + relevant_docs = DatasetDict({eval_split: relevant_docs}) + return corpus, queries, relevant_docs + + +class GermanQuADRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="GermanQuAD-Retrieval", + description="Context Retrieval for German Question Answering", + reference="https://www.kaggle.com/datasets/GermanQuAD", + dataset={ + "path": "mteb/germanquad-retrieval", + "revision": "f5c87ae5a2e7a5106606314eef45255f03151bb3", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="mrr_at_5", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = load_retrieval_data( + self.metadata_dict["dataset"]["path"], self.metadata_dict["eval_splits"] + ) + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/LegalQuADRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/LegalQuADRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d6c201204414e43596efd0199d2202366b0d1e15 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/LegalQuADRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LegalQuAD(AbsTaskRetrieval): + metadata = TaskMetadata( + name="LegalQuAD", + description="The dataset consists of questions and legal documents in German.", + reference="https://github.com/Christoph911/AIKE2021_Appendix", + dataset={ + "path": "mteb/LegalQuAD", + "revision": "37aa6cfb01d48960b0f8e3f17d6e3d99bf1ebc3e", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["deu-Latn"], + main_score="ndcg_at_10", + date=None, + form=["written"], + domains=["Legal"], + task_subtypes=["Question answering"], + license="CC BY 4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/deu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ell/GreekCivicsQA.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ell/GreekCivicsQA.py new file mode 100644 index 0000000000000000000000000000000000000000..a8271ab44a79d49a7506cbcaaca8a3129b119048 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ell/GreekCivicsQA.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +from hashlib import sha256 + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class GreekCivicsQA(AbsTaskRetrieval): + metadata = TaskMetadata( + name="GreekCivicsQA", + description="This dataset was provided by AlloProf, an organisation in Quebec, Canada offering resources and a help forum curated by a large number of teachers to students on all subjects taught from in primary and secondary school", + reference="https://huggingface.co/datasets/antoinelb7/alloprof", + dataset={ + "path": "ilsp/greek_civics_qa", + "revision": "a04523a3c83153be07a8945bb1fb351cbbcef90b", + }, + type="Retrieval", + category="s2p", + eval_splits=["default"], + eval_langs=["ell-Grek"], + main_score="ndcg_at_10", + date=("2023-01-01", "2024-04-01"), + form=["written"], + domains=["Academic"], + task_subtypes=["Question answering"], + license="cc-by-nc-sa-4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="", + n_samples={"default": 407}, + avg_character_length={"default": 2226.85}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + # fetch both subsets of the dataset + eval_split = self.metadata_dict["eval_splits"][0] + data_raw = datasets.load_dataset(**self.metadata_dict["dataset"])[eval_split] + + queries = {eval_split: {}} + corpus = {eval_split: {}} + relevant_docs = {eval_split: {}} + + question_ids = { + question: str(id) + for id, question in zip(data_raw["id"], data_raw["question"]) + } + + context_ids = { + answer: sha256(answer.encode("utf-8")).hexdigest() + for answer in set(data_raw["answer"]) + } + + for row in data_raw: + question = row["question"] + context = row["answer"] + query_id = question_ids[question] + queries[eval_split][query_id] = question + + doc_id = context_ids[context] + corpus[eval_split][doc_id] = {"text": context} + if query_id not in relevant_docs[eval_split]: + relevant_docs[eval_split][query_id] = {} + relevant_docs[eval_split][query_id][doc_id] = 1 + + self.corpus = datasets.DatasetDict(corpus) + self.queries = datasets.DatasetDict(queries) + self.relevant_docs = datasets.DatasetDict(relevant_docs) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ell/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/ell/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/AILACasedocsRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/AILACasedocsRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..1cacab14febbfe83e197c3a00fdeeef68e2ff9c0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/AILACasedocsRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class AILACasedocs(AbsTaskRetrieval): + metadata = TaskMetadata( + name="AILACasedocs", + description="The task is to retrieve the case document that most closely matches or is most relevant to the scenario described in the provided query.", + reference="https://zenodo.org/records/4063986", + dataset={ + "path": "mteb/AILA_casedocs", + "revision": "4106e6bcc72e0698d714ea8b101355e3e238431a", + }, + type="Retrieval", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=["written"], + domains=["Legal"], + task_subtypes=["Article retrieval"], + license="CC BY 4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/AILAStatutesRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/AILAStatutesRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..afdd3ea0415670411f3730c5e4f8afa0084d5440 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/AILAStatutesRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class AILAStatutes(AbsTaskRetrieval): + metadata = TaskMetadata( + name="AILAStatutes", + description="This dataset is structured for the task of identifying the most relevant statutes for a given situation.", + reference="https://zenodo.org/records/4063986", + dataset={ + "path": "mteb/AILA_statutes", + "revision": "ebfcd844eadd3d667efa3c57fc5c8c87f5c2867e", + }, + type="Retrieval", + category="p2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=["written"], + domains=["Legal"], + task_subtypes=["Article retrieval"], + license="CC BY 4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/ArguAnaRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/ArguAnaRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..cc102e20064c64b78366a7f11b8884dec83f2917 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/ArguAnaRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class ArguAna(AbsTaskRetrieval): + metadata = TaskMetadata( + name="ArguAna", + description="NFCorpus: A Full-Text Learning to Rank Dataset for Medical Information Retrieval", + reference="http://argumentation.bplaced.net/arguana/data", + dataset={ + "path": "mteb/arguana", + "revision": "c22ab2a51041ffd869aaddef7af8d8215647e41a", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackAndroidRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackAndroidRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..e569d698597e5886cb721989b992db741a7d073e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackAndroidRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackAndroidRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackAndroidRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-android", + "revision": "f46a197baaae43b4f621051089b82a364682dfeb", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackEnglishRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackEnglishRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..7adac86d4e0cf4c85609debf8a50432ad848d4cf --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackEnglishRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackEnglishRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackEnglishRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-english", + "revision": "ad9991cb51e31e31e430383c75ffb2885547b5f0", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackGamingRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackGamingRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..dd2738683c1e977bbe5b015173e0e10b5df23ac3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackGamingRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackGamingRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackGamingRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-gaming", + "revision": "4885aa143210c98657558c04aaf3dc47cfb54340", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackGisRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackGisRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..3517d5bd9c6279f629d001836028b6d88043f076 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackGisRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackGisRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackGisRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-gis", + "revision": "5003b3064772da1887988e05400cf3806fe491f2", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackMathematicaRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackMathematicaRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..a111c30a9581a27c000cbe1d430382e3eea14b99 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackMathematicaRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackMathematicaRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackMathematicaRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-mathematica", + "revision": "90fceea13679c63fe563ded68f3b6f06e50061de", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackPhysicsRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackPhysicsRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..8a5bcc4fc10d36b1cd58a9535687c852caa648c5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackPhysicsRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackPhysicsRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackPhysicsRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-physics", + "revision": "79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackProgrammersRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackProgrammersRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..544d6111643f092f14730bea2499da5850817567 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackProgrammersRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackProgrammersRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackProgrammersRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-programmers", + "revision": "6184bc1440d2dbc7612be22b50686b8826d22b32", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackStatsRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackStatsRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..9b1c6d2ce2da208fdf4009e320e7b24591f20f08 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackStatsRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackStatsRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackStatsRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-stats", + "revision": "65ac3a16b8e91f9cee4c9828cc7c335575432a2a", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackTexRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackTexRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..80a77dc8653fcc1d94da5fcef057ad906b8558be --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackTexRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackTexRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackTexRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-tex", + "revision": "46989137a86843e03a6195de44b09deda022eec7", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackUnixRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackUnixRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..4dbfe392b73cd039fb08f264aaa8bb2a73ca6b0f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackUnixRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackUnixRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackUnixRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-unix", + "revision": "6c6430d3a6d36f8d2a829195bc5dc94d7e063e53", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackWebmastersRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackWebmastersRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..151a2ecdf540b11da97afb10ed654ae38742a78e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackWebmastersRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackWebmastersRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackWebmastersRetrieval", + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + dataset={ + "path": "mteb/cqadupstack-webmasters", + "revision": "160c094312a0e1facb97e55eeddb698c0abe3571", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackWordpressRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackWordpressRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a1f5a77b39228f9e57d074e86d599f3ebfba53 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/CQADupstackWordpressRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class CQADupstackWordpressRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="CQADupstackWordpressRetrieval", + dataset={ + "path": "mteb/cqadupstack-wordpress", + "revision": "4ffe81d471b1924886b33c7567bfb200e9eec5c4", + }, + description="CQADupStack: A Benchmark Data Set for Community Question-Answering Research", + reference="http://nlp.cis.unimelb.edu.au/resources/cqadupstack/", + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/ClimateFEVERRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/ClimateFEVERRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..be1e8799ae7b448f2fbf5d706e442b9bb58bc821 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/ClimateFEVERRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class ClimateFEVER(AbsTaskRetrieval): + metadata = TaskMetadata( + name="ClimateFEVER", + description="CLIMATE-FEVER is a dataset adopting the FEVER methodology that consists of 1,535 real-world claims regarding climate-change. ", + reference="https://www.sustainablefinance.uzh.ch/en/research/climate-fever.html", + dataset={ + "path": "mteb/climate-fever", + "revision": "47f2ac6acb640fc46020b02a5b59fdda04d39380", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/DBPediaRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/DBPediaRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..9cbbcee077c3dd36299a88db55fec8088476e224 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/DBPediaRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class DBPedia(AbsTaskRetrieval): + metadata = TaskMetadata( + name="DBPedia", + description="DBpedia-Entity is a standard test collection for entity search over the DBpedia knowledge base", + reference="https://github.com/iai-group/DBpedia-Entity/", + dataset={ + "path": "mteb/dbpedia", + "revision": "c0f706b76e590d620bd6618b3ca8efdd34e2d659", + }, + type="Retrieval", + category="s2p", + eval_splits=["dev", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/FEVERRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/FEVERRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..c474fbaa132230e6e781e68840168e3af45dfab3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/FEVERRetrieval.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class FEVER(AbsTaskRetrieval): + metadata = TaskMetadata( + name="FEVER", + dataset={ + "path": "mteb/fever", + "revision": "bea83ef9e8fb933d90a2f1d5515737465d613e12", + }, + description=( + "FEVER (Fact Extraction and VERification) consists of 185,445 claims generated by altering sentences" + " extracted from Wikipedia and subsequently verified without knowledge of the sentence they were" + " derived from." + ), + reference="https://fever.ai/", + type="Retrieval", + category="s2p", + eval_splits=["train", "dev", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/FiQA2018Retrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/FiQA2018Retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..149f4c78aba76d21242c56c028355f0a95ec4ebc --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/FiQA2018Retrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class FiQA2018(AbsTaskRetrieval): + metadata = TaskMetadata( + name="FiQA2018", + description="Financial Opinion Mining and Question Answering", + reference="https://sites.google.com/view/fiqa/", + dataset={ + "path": "mteb/fiqa", + "revision": "27a168819829fe9bcd655c2df245fb19452e8e06", + }, + type="Retrieval", + category="s2p", + eval_splits=["train", "dev", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/HagridRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/HagridRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..4a894a025cdee035de2e9e836efaaa714fd2a58b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/HagridRetrieval.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +import uuid +from typing import Dict, List + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class HagridRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="HagridRetrieval", + dataset={ + "path": "miracl/hagrid", + "revision": "b2a085913606be3c4f2f1a8bff1810e38bade8fa", + }, + reference="https://github.com/project-miracl/hagrid", + description=( + "HAGRID (Human-in-the-loop Attributable Generative Retrieval for Information-seeking Dataset)" + "is a dataset for generative information-seeking scenarios. It consists of queries" + "along with a set of manually labelled relevant passages" + ), + type="Retrieval", + category="s2p", + eval_splits=["dev"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + """Loads the different split of the dataset (queries/corpus/relevants)""" + if self.data_loaded: + return + + data = datasets.load_dataset( + "miracl/hagrid", + split=self.metadata.eval_splits[0], + revision=self.metadata_dict["dataset"].get("revision", None), + ) + proc_data = self.preprocess_data(data) + + self.queries = { + self.metadata.eval_splits[0]: { + d["query_id"]: d["query_text"] for d in proc_data + } + } + self.corpus = { + self.metadata.eval_splits[0]: { + d["answer_id"]: {"text": d["answer_text"]} for d in proc_data + } + } + self.relevant_docs = { + self.metadata.eval_splits[0]: { + d["query_id"]: {d["answer_id"]: 1} for d in proc_data + } + } + + self.data_loaded = True + + def preprocess_data(self, dataset: Dict) -> List[Dict]: + """Preprocessed the data in a format easirer + to handle for the loading of queries and corpus + ------ + PARAMS + dataset : the hagrid dataset (json) + """ + preprocessed_data = [] + for d in dataset: + # get the best answer among positively rated answers + best_answer = self.get_best_answer(d) + # if no good answer found, skip + if best_answer is not None: + preprocessed_data.append( + { + "query_id": str(d["query_id"]), + "query_text": d["query"], + "answer_id": str(uuid.uuid4()), + "answer_text": best_answer, + } + ) + + return preprocessed_data + + def get_best_answer(self, data: Dict) -> str: + """Get the best answer among available answers + of a query. + WARNING : May return None if no good answer available + -------- + PARAMS: + data: a dict representing one element of the dataset + """ + good_answers = [ + a["answer"] + for a in data["answers"] + if a["informative"] == 1 and a["attributable"] == 1 + ] + # Return 1st one if >=1 good answers else None + return good_answers[0] if len(good_answers) > 0 else None diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/HotpotQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/HotpotQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..86bdfd59a1f306887396d0b7fb9be71b349f3c03 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/HotpotQARetrieval.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class HotpotQA(AbsTaskRetrieval): + metadata = TaskMetadata( + name="HotpotQA", + dataset={ + "path": "mteb/hotpotqa", + "revision": "ab518f4d6fcca38d87c25209f94beba119d02014", + }, + description=( + "HotpotQA is a question answering dataset featuring natural, multi-hop questions, with strong" + " supervision for supporting facts to enable more explainable question answering systems." + ), + reference="https://hotpotqa.github.io/", + type="Retrieval", + category="s2p", + eval_splits=["train", "dev", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBNarrativeQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBNarrativeQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..50110cd22bf012ccdf19c75e18d1240d8e6abbc2 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBNarrativeQARetrieval.py @@ -0,0 +1,86 @@ +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LEMBNarrativeQARetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = "test" + + metadata = TaskMetadata( + name="LEMBNarrativeQARetrieval", + dataset={ + "path": "dwzhu/LongEmbed", + "revision": "6e346642246bfb4928c560ee08640dc84d074e8c", + "name": "narrativeqa", + }, + reference="https://huggingface.co/datasets/dwzhu/LongEmbed", + description=("narrativeqa subset of dwzhu/LongEmbed dataset."), + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("1000-01-01", "2017-12-31"), + form=["written"], + domains=["Fiction", "Non-fiction"], + task_subtypes=["Article retrieval"], + license="Not specified", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @article{kocisky-etal-2018-narrativeqa, + title = "The {N}arrative{QA} Reading Comprehension Challenge", + author = "Ko{\v{c}}isk{\'y}, Tom{\'a}{\v{s}} and + Schwarz, Jonathan and + Blunsom, Phil and + Dyer, Chris and + Hermann, Karl Moritz and + Melis, G{\'a}bor and + Grefenstette, Edward", + editor = "Lee, Lillian and + Johnson, Mark and + Toutanova, Kristina and + Roark, Brian", + journal = "Transactions of the Association for Computational Linguistics", + volume = "6", + year = "2018", + address = "Cambridge, MA", + publisher = "MIT Press", + url = "https://aclanthology.org/Q18-1023", + doi = "10.1162/tacl_a_00023", + pages = "317--328", + abstract = "", + } + """, + n_samples={_EVAL_SPLIT: 10804}, + avg_character_length={_EVAL_SPLIT: 326399.3}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "queries" + ] # dict_keys(['qid', 'text']) + queries = {row["qid"]: row["text"] for row in query_list} + + corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "corpus" + ] # dict_keys(['doc_id', 'text']) + corpus = {row["doc_id"]: {"text": row["text"]} for row in corpus_list} + + qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "qrels" + ] # dict_keys(['qid', 'doc_id']) + qrels = {row["qid"]: {row["doc_id"]: 1} for row in qrels_list} + + self.corpus = {self._EVAL_SPLIT: corpus} + self.queries = {self._EVAL_SPLIT: queries} + self.relevant_docs = {self._EVAL_SPLIT: qrels} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBNeedleRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBNeedleRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..b5e3a19542d4d657150977ee62072a4bac7b0299 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBNeedleRetrieval.py @@ -0,0 +1,111 @@ +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LEMBNeedleRetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = [ + "test_256", + "test_512", + "test_1024", + "test_2048", + "test_4096", + "test_8192", + "test_16384", + "test_32768", + ] + + metadata = TaskMetadata( + name="LEMBNeedleRetrieval", + dataset={ + "path": "dwzhu/LongEmbed", + "revision": "6e346642246bfb4928c560ee08640dc84d074e8c", + "name": "needle", + }, + reference="https://huggingface.co/datasets/dwzhu/LongEmbed", + description=("needle subset of dwzhu/LongEmbed dataset."), + type="Retrieval", + category="s2p", + eval_splits=_EVAL_SPLIT, + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("2000-01-01", "2023-12-31"), + form=["written"], + domains=["Academic", "Blog"], + task_subtypes=["Article retrieval"], + license="Not specified", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @article{zhu2024longembed, + title={LongEmbed: Extending Embedding Models for Long Context Retrieval}, + author={Zhu, Dawei and Wang, Liang and Yang, Nan and Song, Yifan and Wu, Wenhao and Wei, Furu and Li, Sujian}, + journal={arXiv preprint arXiv:2404.12096}, + year={2024} + } + """, + n_samples={ + "test_256": 150, + "test_512": 150, + "test_1024": 150, + "test_2048": 150, + "test_4096": 150, + "test_8192": 150, + "test_16384": 150, + "test_32768": 150, + }, + avg_character_length={ + "test_256": 1074.4, + "test_512": 2067.0, + "test_1024": 4129.5, + "test_2048": 8513.4, + "test_4096": 17452.7, + "test_8192": 35261.6, + "test_16384": 72113.7, + "test_32768": 141829.0, + }, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus = {} + self.queries = {} + self.relevant_docs = {} + + for split in self._EVAL_SPLIT: + context_length = int(split.split("_")[1]) + query_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "queries" + ] # dict_keys(['qid', 'text']) + query_list = query_list.filter( + lambda x: x["context_length"] == context_length + ) + queries = {row["qid"]: row["text"] for row in query_list} + + corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "corpus" + ] # dict_keys(['doc_id', 'text']) + corpus_list = corpus_list.filter( + lambda x: x["context_length"] == context_length + ) + corpus = {row["doc_id"]: {"text": row["text"]} for row in corpus_list} + + qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "qrels" + ] # dict_keys(['qid', 'doc_id']) + qrels_list = qrels_list.filter( + lambda x: x["context_length"] == context_length + ) + qrels = {row["qid"]: {row["doc_id"]: 1} for row in qrels_list} + + self.corpus[split] = corpus + self.queries[split] = queries + self.relevant_docs[split] = qrels + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBPasskeyRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBPasskeyRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..45f874df44d9ae54d0303088674cf90ee816ebc5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBPasskeyRetrieval.py @@ -0,0 +1,111 @@ +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LEMBPasskeyRetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = [ + "test_256", + "test_512", + "test_1024", + "test_2048", + "test_4096", + "test_8192", + "test_16384", + "test_32768", + ] + + metadata = TaskMetadata( + name="LEMBPasskeyRetrieval", + dataset={ + "path": "dwzhu/LongEmbed", + "revision": "6e346642246bfb4928c560ee08640dc84d074e8c", + "name": "passkey", + }, + reference="https://huggingface.co/datasets/dwzhu/LongEmbed", + description=("passkey subset of dwzhu/LongEmbed dataset."), + type="Retrieval", + category="s2p", + eval_splits=_EVAL_SPLIT, + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("2000-01-01", "2023-12-31"), + form=["written"], + domains=["Fiction"], + task_subtypes=["Article retrieval"], + license="Not specified", + socioeconomic_status="low", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @article{zhu2024longembed, + title={LongEmbed: Extending Embedding Models for Long Context Retrieval}, + author={Zhu, Dawei and Wang, Liang and Yang, Nan and Song, Yifan and Wu, Wenhao and Wei, Furu and Li, Sujian}, + journal={arXiv preprint arXiv:2404.12096}, + year={2024} + } + """, + n_samples={ + "test_256": 150, + "test_512": 150, + "test_1024": 150, + "test_2048": 150, + "test_4096": 150, + "test_8192": 150, + "test_16384": 150, + "test_32768": 150, + }, + avg_character_length={ + "test_256": 914.9, + "test_512": 1823.0, + "test_1024": 3644.7, + "test_2048": 7280.0, + "test_4096": 14555.5, + "test_8192": 29108.1, + "test_16384": 58213.9, + "test_32768": 116417.9, + }, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus = {} + self.queries = {} + self.relevant_docs = {} + + for split in self._EVAL_SPLIT: + context_length = int(split.split("_")[1]) + query_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "queries" + ] # dict_keys(['qid', 'text']) + query_list = query_list.filter( + lambda x: x["context_length"] == context_length + ) + queries = {row["qid"]: row["text"] for row in query_list} + + corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "corpus" + ] # dict_keys(['doc_id', 'text']) + corpus_list = corpus_list.filter( + lambda x: x["context_length"] == context_length + ) + corpus = {row["doc_id"]: {"text": row["text"]} for row in corpus_list} + + qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "qrels" + ] # dict_keys(['qid', 'doc_id']) + qrels_list = qrels_list.filter( + lambda x: x["context_length"] == context_length + ) + qrels = {row["qid"]: {row["doc_id"]: 1} for row in qrels_list} + + self.corpus[split] = corpus + self.queries[split] = queries + self.relevant_docs[split] = qrels + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBQMSumRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBQMSumRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..ac4e492ceb245696cd5694f440c12d9f44459933 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBQMSumRetrieval.py @@ -0,0 +1,95 @@ +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LEMBQMSumRetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = "test" + + metadata = TaskMetadata( + name="LEMBQMSumRetrieval", + dataset={ + "path": "dwzhu/LongEmbed", + "revision": "6e346642246bfb4928c560ee08640dc84d074e8c", + "name": "qmsum", + }, + reference="https://huggingface.co/datasets/dwzhu/LongEmbed", + description=("qmsum subset of dwzhu/LongEmbed dataset."), + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("1950-01-01", "2021-12-31"), + form=["written"], + domains=["Spoken"], + task_subtypes=["Article retrieval"], + license="Not specified", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{zhong-etal-2021-qmsum, + title = "{QMS}um: A New Benchmark for Query-based Multi-domain Meeting Summarization", + author = "Zhong, Ming and + Yin, Da and + Yu, Tao and + Zaidi, Ahmad and + Mutuma, Mutethia and + Jha, Rahul and + Awadallah, Ahmed Hassan and + Celikyilmaz, Asli and + Liu, Yang and + Qiu, Xipeng and + Radev, Dragomir", + editor = "Toutanova, Kristina and + Rumshisky, Anna and + Zettlemoyer, Luke and + Hakkani-Tur, Dilek and + Beltagy, Iz and + Bethard, Steven and + Cotterell, Ryan and + Chakraborty, Tanmoy and + Zhou, Yichao", + booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", + month = jun, + year = "2021", + address = "Online", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2021.naacl-main.472", + doi = "10.18653/v1/2021.naacl-main.472", + pages = "5905--5921", + abstract = "", + } + """, + n_samples={_EVAL_SPLIT: 1724}, + avg_character_length={_EVAL_SPLIT: 56136.4}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "queries" + ] # dict_keys(['qid', 'text']) + queries = {row["qid"]: row["text"] for row in query_list} + + corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "corpus" + ] # dict_keys(['doc_id', 'text']) + corpus = {row["doc_id"]: {"text": row["text"]} for row in corpus_list} + + qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "qrels" + ] # dict_keys(['qid', 'doc_id']) + qrels = {row["qid"]: {row["doc_id"]: 1} for row in qrels_list} + + self.corpus = {self._EVAL_SPLIT: corpus} + self.queries = {self._EVAL_SPLIT: queries} + self.relevant_docs = {self._EVAL_SPLIT: qrels} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBSummScreenFDRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBSummScreenFDRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..9f7d254ad2f573cdbbeabfc51a0bb38af9b8d7fa --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBSummScreenFDRetrieval.py @@ -0,0 +1,82 @@ +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LEMBSummScreenFDRetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = "validation" + + metadata = TaskMetadata( + name="LEMBSummScreenFDRetrieval", + dataset={ + "path": "dwzhu/LongEmbed", + "revision": "6e346642246bfb4928c560ee08640dc84d074e8c", + "name": "summ_screen_fd", + }, + reference="https://huggingface.co/datasets/dwzhu/LongEmbed", + description=("summ_screen_fd subset of dwzhu/LongEmbed dataset."), + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("2000-01-01", "2021-12-31"), + form=["written"], + domains=["Spoken"], + task_subtypes=["Article retrieval"], + license="Not specified", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{chen-etal-2022-summscreen, + title = "{S}umm{S}creen: A Dataset for Abstractive Screenplay Summarization", + author = "Chen, Mingda and + Chu, Zewei and + Wiseman, Sam and + Gimpel, Kevin", + editor = "Muresan, Smaranda and + Nakov, Preslav and + Villavicencio, Aline", + booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", + month = may, + year = "2022", + address = "Dublin, Ireland", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.acl-long.589", + doi = "10.18653/v1/2022.acl-long.589", + pages = "8602--8615", + abstract = "", + } + """, + n_samples={_EVAL_SPLIT: 672}, + avg_character_length={_EVAL_SPLIT: 31445.8}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "queries" + ] # dict_keys(['qid', 'text']) + queries = {row["qid"]: row["text"] for row in query_list} + + corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "corpus" + ] # dict_keys(['doc_id', 'text']) + corpus = {row["doc_id"]: {"text": row["text"]} for row in corpus_list} + + qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "qrels" + ] # dict_keys(['qid', 'doc_id']) + qrels = {row["qid"]: {row["doc_id"]: 1} for row in qrels_list} + + self.corpus = {self._EVAL_SPLIT: corpus} + self.queries = {self._EVAL_SPLIT: queries} + self.relevant_docs = {self._EVAL_SPLIT: qrels} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBWikimQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBWikimQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..e2269baad5a12e28972b255c13786e7b37cfd6d6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LEMBWikimQARetrieval.py @@ -0,0 +1,70 @@ +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LEMBWikimQARetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = "test" + + metadata = TaskMetadata( + name="LEMBWikimQARetrieval", + dataset={ + "path": "dwzhu/LongEmbed", + "revision": "6e346642246bfb4928c560ee08640dc84d074e8c", + "name": "2wikimqa", + }, + reference="https://huggingface.co/datasets/dwzhu/LongEmbed", + description=("2wikimqa subset of dwzhu/LongEmbed dataset."), + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("1950-01-01", "2019-12-31"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Article retrieval"], + license="Not specified", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{ho2020constructing, + title={Constructing A Multi-hop QA Dataset for Comprehensive Evaluation of Reasoning Steps}, + author={Ho, Xanh and Nguyen, Anh-Khoa Duong and Sugawara, Saku and Aizawa, Akiko}, + booktitle={Proceedings of the 28th International Conference on Computational Linguistics}, + pages={6609--6625}, + year={2020} + } + """, + n_samples={_EVAL_SPLIT: 500}, + avg_character_length={_EVAL_SPLIT: 37513}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "queries" + ] # dict_keys(['qid', 'text']) + queries = {row["qid"]: row["text"] for row in query_list} + + corpus_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "corpus" + ] # dict_keys(['doc_id', 'text']) + corpus = {row["doc_id"]: {"text": row["text"]} for row in corpus_list} + + qrels_list = datasets.load_dataset(**self.metadata_dict["dataset"])[ + "qrels" + ] # dict_keys(['qid', 'doc_id']) + qrels = {row["qid"]: {row["doc_id"]: 1} for row in qrels_list} + + self.corpus = {self._EVAL_SPLIT: corpus} + self.queries = {self._EVAL_SPLIT: queries} + self.relevant_docs = {self._EVAL_SPLIT: qrels} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalBenchConsumerContractsQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalBenchConsumerContractsQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..ca738d0ccd2a56a6e6bd79f6ac561a3c5f1eb34e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalBenchConsumerContractsQARetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LegalBenchConsumerContractsQA(AbsTaskRetrieval): + metadata = TaskMetadata( + name="LegalBenchConsumerContractsQA", + description="The dataset includes questions and answers related to contracts.", + reference="https://huggingface.co/datasets/nguha/legalbench/viewer/consumer_contracts_qa", + dataset={ + "path": "mteb/legalbench_consumer_contracts_qa", + "revision": "b23590301ec94e8087e2850b21d43d4956b1cca9", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=["written"], + domains=["Legal"], + task_subtypes=["Question answering"], + license="CC BY-NC 4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalBenchCorporateLobbyingRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalBenchCorporateLobbyingRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..a8798970d8eec750f13cca39d9075996d7a6c1f6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalBenchCorporateLobbyingRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LegalBenchCorporateLobbying(AbsTaskRetrieval): + metadata = TaskMetadata( + name="LegalBenchCorporateLobbying", + description="The dataset includes bill titles and bill summaries related to corporate lobbying.", + reference="https://huggingface.co/datasets/nguha/legalbench/viewer/corporate_lobbying", + dataset={ + "path": "mteb/legalbench_corporate_lobbying", + "revision": "f69691c650464e62546d7f2a4536f8f87c891e38", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=["written"], + domains=["Legal"], + task_subtypes=["Article retrieval"], + license="CC BY 4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalSummarizationRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalSummarizationRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..0beec16661db58734351ec9fb8743691088691ce --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/LegalSummarizationRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class LegalSummarization(AbsTaskRetrieval): + metadata = TaskMetadata( + name="LegalSummarization", + description="The dataset consistes of 439 pairs of contracts and their summarizations from https://tldrlegal.com and https://tosdr.org/.", + reference="https://github.com/lauramanor/legal_summarization", + dataset={ + "path": "mteb/legal_summarization", + "revision": "3bb1a05c66872889662af04c5691c14489cebd72", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=["written"], + domains=["Legal"], + task_subtypes=["Article retrieval"], + license="Apache License 2.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=None, + text_creation="found", + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MLQuestions.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MLQuestions.py new file mode 100644 index 0000000000000000000000000000000000000000..00412ef71ba67624c46dab1898a8292c8ebe5fb0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MLQuestions.py @@ -0,0 +1,103 @@ +import csv + +from huggingface_hub import snapshot_download + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class MLQuestionsRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="MLQuestions", + dataset={ + "path": "McGill-NLP/mlquestions", + "revision": "83b690cb666c5a8869e7f213a877bbd24a642d7c", + }, + reference="https://github.com/McGill-NLP/MLQuestions", + description=( + "MLQuestions is a domain adaptation dataset for the machine learning domain" + "It consists of ML questions along with passages from Wikipedia machine learning pages (https://en.wikipedia.org/wiki/Category:Machine_learning)" + ), + type="Retrieval", + category="s2p", + eval_splits=["dev", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=( + "2021-01-01", + "2021-03-31", + ), # The period here is for both wiki articles and queries + form=["written"], + domains=["Encyclopaedic", "Academic"], + task_subtypes=["Question answering"], + license="cc-by-nc-sa-4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{kulshreshtha-etal-2021-back, + title = "Back-Training excels Self-Training at Unsupervised Domain Adaptation of Question Generation and Passage Retrieval", + author = "Kulshreshtha, Devang and + Belfer, Robert and + Serban, Iulian Vlad and + Reddy, Siva", + booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", + month = nov, + year = "2021", + address = "Online and Punta Cana, Dominican Republic", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2021.emnlp-main.566", + pages = "7064--7078", + abstract = "In this work, we introduce back-training, an alternative to self-training for unsupervised domain adaptation (UDA). While self-training generates synthetic training data where natural inputs are aligned with noisy outputs, back-training results in natural outputs aligned with noisy inputs. This significantly reduces the gap between target domain and synthetic data distribution, and reduces model overfitting to source domain. We run UDA experiments on question generation and passage retrieval from the Natural Questions domain to machine learning and biomedical domains. We find that back-training vastly outperforms self-training by a mean improvement of 7.8 BLEU-4 points on generation, and 17.6{\%} top-20 retrieval accuracy across both domains. We further propose consistency filters to remove low-quality synthetic data before training. We also release a new domain-adaptation dataset - MLQuestions containing 35K unaligned questions, 50K unaligned passages, and 3K aligned question-passage pairs.", + } + """, + n_samples={"dev": 1500, "test": 1500}, + avg_character_length={"dev": 305, "test": 307}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + self.corpus, self.queries, self.relevant_docs = {}, {}, {} + dataset_path = self.metadata_dict["dataset"]["path"] + revision = self.metadata_dict["dataset"].get("revision", None) + download_dir = snapshot_download( + repo_id=dataset_path, repo_type="dataset", revision=revision + ) + for split in kwargs.get("eval_splits", self.metadata_dict["eval_splits"]): + corpus, queries, qrels = self._load_data_for_split(download_dir, split) + self.corpus[split], self.queries[split], self.relevant_docs[split] = ( + corpus, + queries, + qrels, + ) + + self.data_loaded = True + + def _load_data_for_split(self, download_dir, split): + queries, corpus, qrels = {}, {}, {} + + dataset_path = f"{download_dir}/{split}.csv" + with open(dataset_path, "r") as csvfile: + reader = csv.DictReader(csvfile) + for i, row in enumerate(reader): + query_id = str(i) + doc_id = row["indexes"] + query = row["target_text"] + queries[query_id] = query + qrels[query_id] = {doc_id: 1} + + # Same corpus for all splits + corpus_path = f"{download_dir}/test_passages.csv" + with open(corpus_path, "r") as csvfile: + reader = csv.DictReader(csvfile) + for i, row in enumerate(reader): + doc_id = str(i) + corpus[doc_id] = { + "title": "", + "text": row["input_text"], + } + + return corpus, queries, qrels diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MSMARCORetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MSMARCORetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..7c8cb9556d817c1d93a5767391710505ab63421d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MSMARCORetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class MSMARCO(AbsTaskRetrieval): + metadata = TaskMetadata( + name="MSMARCO", + dataset={ + "path": "mteb/msmarco", + "revision": "c5a29a104738b98a9e76336939199e264163d4a0", + }, + description="MS MARCO is a collection of datasets focused on deep learning in search", + reference="https://microsoft.github.io/msmarco/", + type="Retrieval", + category="s2p", + eval_splits=["train", "dev", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MSMARCOv2Retrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MSMARCOv2Retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..e3761b9f0ad9e4f6190ec82cba8494c5508c18a6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MSMARCOv2Retrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class MSMARCOv2(AbsTaskRetrieval): + metadata = TaskMetadata( + name="MSMARCOv2", + dataset={ + "path": "mteb/msmarco-v2", + "revision": "b1663124850d305ab7c470bb0548acf8e2e7ea43", + }, + description="MS MARCO is a collection of datasets focused on deep learning in search", + reference="https://microsoft.github.io/msmarco/TREC-Deep-Learning.html", + type="Retrieval", + category="s2p", + eval_splits=["train", "dev", "dev2"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MedicalQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MedicalQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..8d81b4c497ef6da22b46d076921259282f9f8f08 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/MedicalQARetrieval.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class MedicalQARetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="MedicalQARetrieval", + description="The dataset consists 2048 medical question and answer pairs.", + reference="https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-019-3119-4", + dataset={ + "path": "mteb/medical_qa", + "revision": "ae763399273d8b20506b80cf6f6f9a31a6a2b238", + }, + type="Retrieval", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("2017-01-01", "2019-12-31"), # best guess, + form=["written"], + domains=["Medical"], + task_subtypes=["Article retrieval"], + license="CC0 1.0 Universal", + socioeconomic_status="medium", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@ARTICLE{BenAbacha-BMC-2019, + author = {Asma, Ben Abacha and Dina, Demner{-}Fushman}, + title = {A Question-Entailment Approach to Question Answering}, + journal = {{BMC} Bioinform.}, + volume = {20}, + number = {1}, + pages = {511:1--511:23}, + year = {2019}, + url = {https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-019-3119-4} + } """, + n_samples={"test": 2048}, + avg_character_length={"test": 1205.9619140625}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NFCorpusRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NFCorpusRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..5b647ad08e31a97a9323ca555d1c71206f855db3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NFCorpusRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class NFCorpus(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NFCorpus", + dataset={ + "path": "mteb/nfcorpus", + "revision": "ec0fa4fe99da2ff19ca1214b7966684033a58814", + }, + description="NFCorpus: A Full-Text Learning to Rank Dataset for Medical Information Retrieval", + reference="https://www.cl.uni-heidelberg.de/statnlpgroup/nfcorpus/", + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NQRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NQRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..4b7de7d175f208a120d1267c99029d317ffee36d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NQRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class NQ(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NQ", + dataset={ + "path": "mteb/nq", + "revision": "b774495ed302d8c44a3a7ea25c90dbce03968f31", + }, + description="NFCorpus: A Full-Text Learning to Rank Dataset for Medical Information Retrieval", + reference="https://ai.google.com/research/NaturalQuestions/", + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NarrativeQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NarrativeQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..cd064948682bb4441f4ce6c660f3c30f91f25e5e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/NarrativeQARetrieval.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class NarrativeQARetrieval(AbsTaskRetrieval): + _EVAL_SPLIT = "test" + + metadata = TaskMetadata( + name="NarrativeQARetrieval", + dataset={ + "path": "narrativeqa", + "revision": "2e643e7363944af1c33a652d1c87320d0871c4e4", + }, + reference="https://metatext.io/datasets/narrativeqa", + description=( + "NarrativeQA is a dataset for the task of question answering on long narratives. It consists of " + "realistic QA instances collected from literature (fiction and non-fiction) and movie scripts. " + ), + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + data = datasets.load_dataset( + split=self._EVAL_SPLIT, + **self.metadata_dict["dataset"], + ) + self.queries = { + self._EVAL_SPLIT: { + str(i): row["question"]["text"] for i, row in enumerate(data) + } + } + self.corpus = { + self._EVAL_SPLIT: { + str(row["document"]["id"]): {"text": row["document"]["text"]} + for row in data + } + } + self.relevant_docs = { + self._EVAL_SPLIT: { + str(i): {row["document"]["id"]: 1} for i, row in enumerate(data) + } + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/QuoraRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/QuoraRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..fffeb8611fccb1eafe2fe0dec174c1abf32c173c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/QuoraRetrieval.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class QuoraRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="QuoraRetrieval", + dataset={ + "path": "mteb/quora", + "revision": "e4e08e0b7dbe3c8700f0daef558ff32256715259", + }, + description=( + "QuoraRetrieval is based on questions that are marked as duplicates on the Quora platform. Given a" + " question, find other (duplicate) questions." + ), + reference="https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs", + type="Retrieval", + category="s2s", + eval_splits=["dev", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/SCIDOCSRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/SCIDOCSRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..1fbcc67ccba0b54103f1c6fdab4b80f9a63a7c1d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/SCIDOCSRetrieval.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class SCIDOCS(AbsTaskRetrieval): + metadata = TaskMetadata( + name="SCIDOCS", + dataset={ + "path": "mteb/scidocs", + "revision": "f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88", + }, + description=( + "SciDocs, a new evaluation benchmark consisting of seven document-level tasks ranging from citation" + " prediction, to document classification and recommendation." + ), + reference="https://allenai.org/data/scidocs", + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/SciFactRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/SciFactRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..a59854ce8b93aee12e467870b484a4f2aca289e8 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/SciFactRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class SciFact(AbsTaskRetrieval): + metadata = TaskMetadata( + name="SciFact", + dataset={ + "path": "mteb/scifact", + "revision": "0228b52cf27578f30900b9e5271d331663a030d7", + }, + description="SciFact verifies scientific claims using evidence from the research literature containing scientific paper abstracts.", + reference="https://github.com/allenai/scifact", + type="Retrieval", + category="s2p", + eval_splits=["train", "test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/TRECCOVIDRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/TRECCOVIDRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d082a6fb73529d78b4cf63ddfd5c9ddabcf57434 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/TRECCOVIDRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class TRECCOVID(AbsTaskRetrieval): + metadata = TaskMetadata( + name="TRECCOVID", + description="TRECCOVID is an ad-hoc search challenge based on the COVID-19 dataset containing scientific articles related to the COVID-19 pandemic.", + reference="https://ir.nist.gov/covidSubmit/index.html", + dataset={ + "path": "mteb/trec-covid", + "revision": "bb9466bac8153a0349341eb1b22e06409e78ef4e", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/TopiOCQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/TopiOCQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..20fdcbf7f610fa0e691c696e72219d8523c17cc3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/TopiOCQARetrieval.py @@ -0,0 +1,102 @@ +from datasets import load_dataset + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +CORPUS_HF_NAME, CORPUS_HF_VERSION, CORPUS_HF_SPLIT = ( + "McGill-NLP/TopiOCQA-wiki-corpus", + "50ae3b82713b1a935190def03ce7e7e75a318636", + "train", +) + + +class TopiOCQARetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="TopiOCQA", + dataset={ + "path": "McGill-NLP/TopiOCQA", + "revision": "66cd1dbf5577c653ecb99b385200f08e15e12f30", + }, + reference="https://mcgill-nlp.github.io/topiocqa", + description=( + "TopiOCQA (Human-in-the-loop Attributable Generative Retrieval for Information-seeking Dataset)" + "is information-seeking conversational dataset with challenging topic switching phenomena." + "It consists of conversation histories along with manually labelled relevant/gold passage." + ), + type="Retrieval", + category="s2p", + eval_splits=["validation"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("2021-03-01", "2021-07-31"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Conversational retrieval"], + license="cc-by-nc-sa-4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{adlakha2022topiocqa, + title={Topi{OCQA}: Open-domain Conversational Question Answering with Topic Switching}, + author={Adlakha, Vaibhav and Dhuliawala, Shehzaad and Suleman, Kaheer and de Vries, Harm and Reddy, Siva}, + journal={Transactions of the Association for Computational Linguistics}, + volume = {10}, + pages = {468-483}, + year = {2022}, + month = {04}, + year={2022}, + issn = {2307-387X}, + doi = {10.1162/tacl_a_00471}, + url = {https://doi.org/10.1162/tacl\_a\_00471}, + eprint = {https://direct.mit.edu/tacl/article-pdf/doi/10.1162/tacl\_a\_00471/2008126/tacl\_a\_00471.pdf}, + } + """, + n_samples={"dev": 2514}, + avg_character_length={"validation": 708}, + ) + + # TODO: Will be removed if curated and added to mteb HF + def load_data(self, **kwargs): + if self.data_loaded: + return + self.corpus, self.queries, self.relevant_docs = {}, {}, {} + dataset_path = self.metadata_dict["dataset"]["path"] + for split in kwargs.get("eval_splits", self.metadata_dict["eval_splits"]): + corpus, queries, qrels = self._load_data_for_split(dataset_path, split) + self.corpus[split], self.queries[split], self.relevant_docs[split] = ( + corpus, + queries, + qrels, + ) + + self.data_loaded = True + + def _load_data_for_split(self, dataset_path, split): + revision = self.metadata_dict["dataset"].get("revision", None) + ds = load_dataset( + dataset_path, + split=split, + revision=revision, + ) + queries, corpus, qrels = {}, {}, {} + for sample in ds: + query_id = f"{sample['Conversation_no']}-{sample['Turn_no']}" + query = sample["Context"] + [sample["Question"]] + doc_id = sample["Gold_passage"]["id"] + queries[query_id] = query + qrels[query_id] = {doc_id: 1} + + corpus_ds = load_dataset( + CORPUS_HF_NAME, revision=CORPUS_HF_VERSION, split=CORPUS_HF_SPLIT + ) + for doc in corpus_ds: + doc_id = doc["id"] + corpus[doc_id] = { + "title": "; ".join([doc["title"], doc["sub_title"]]), + "text": doc["contents"], + } + + return corpus, queries, qrels diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/Touche2020Retrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/Touche2020Retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..f365446b7e230cfb9588a4640ff7de329695287f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/Touche2020Retrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class Touche2020(AbsTaskRetrieval): + metadata = TaskMetadata( + name="Touche2020", + description="Touché Task 1: Argument Retrieval for Controversial Questions", + reference="https://webis.de/events/touche-20/shared-task-1.html", + dataset={ + "path": "mteb/touche2020", + "revision": "a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/eng/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/est/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/est/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/est/estqa.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/est/estqa.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d94be1f8a190079a7b2b2b8cc1ea83deeacbe9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/est/estqa.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class EstQA(AbsTaskRetrieval): + metadata = TaskMetadata( + name="EstQA", + dataset={ + "path": "kardosdrur/estonian-qa", + "revision": "99d6f921d9dd4d09116a6312deceb22c16529cfb", + }, + description=( + "EstQA is an Estonian question answering dataset based on Wikipedia." + ), + reference="https://www.semanticscholar.org/paper/Extractive-Question-Answering-for-Estonian-Language-182912IAPM-Alum%C3%A4e/ea4f60ab36cadca059c880678bc4c51e293a85d6?utm_source=direct_link", + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["est-Latn"], + main_score="ndcg_at_10", + date=( + "2002-08-24", + "2021-05-10", + ), # birth of Estonian Wikipedia to publishing the article + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Question answering"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation=""" +@mastersthesis{mastersthesis, + author = {Anu Käver}, + title = {Extractive Question Answering for Estonian Language}, + school = {Tallinn University of Technology (TalTech)}, + year = 2021 +} +""", + n_samples={"test": 603}, + avg_character_length={"test": 772.5331950207469}, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/AlloprofRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/AlloprofRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..5fdf1fa16a8aa76b1d438e3357c3be8d30320205 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/AlloprofRetrieval.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class AlloprofRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="AlloprofRetrieval", + description="This dataset was provided by AlloProf, an organisation in Quebec, Canada offering resources and a help forum curated by a large number of teachers to students on all subjects taught from in primary and secondary school", + reference="https://huggingface.co/datasets/antoinelb7/alloprof", + dataset={ + "path": "lyon-nlp/alloprof", + "revision": "fcf295ea64c750f41fadbaa37b9b861558e1bfbd", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + # fetch both subsets of the dataset + corpus_raw = datasets.load_dataset( + name="documents", + **self.metadata_dict["dataset"], + ) + queries_raw = datasets.load_dataset( + name="queries", + **self.metadata_dict["dataset"], + ) + eval_split = self.metadata_dict["eval_splits"][0] + self.queries = { + eval_split: {str(q["id"]): q["text"] for q in queries_raw[eval_split]} + } + self.corpus = { + eval_split: { + str(d["uuid"]): {"text": d["text"]} for d in corpus_raw[eval_split] + } + } + + self.relevant_docs = {eval_split: {}} + for q in queries_raw[eval_split]: + for r in q["relevant"]: + self.relevant_docs[eval_split][str(q["id"])] = {r: 1} + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/BSARDRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/BSARDRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..a8025514aab08e39406a4ab267c37e6e01856131 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/BSARDRetrieval.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class BSARDRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="BSARDRetrieval", + description="The Belgian Statutory Article Retrieval Dataset (BSARD) is a French native dataset for studying legal information retrieval. BSARD consists of more than 22,600 statutory articles from Belgian law and about 1,100 legal questions posed by Belgian citizens and labeled by experienced jurists with relevant articles from the corpus.", + reference="https://huggingface.co/datasets/maastrichtlawtech/bsard", + dataset={ + "path": "maastrichtlawtech/bsard", + "revision": "5effa1b9b5fa3b0f9e12523e6e43e5f86a6e6d59", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["fra-Latn"], + main_score="recall_at_100", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + # fetch both subsets of the dataset, only test split + corpus_raw = datasets.load_dataset( + name="corpus", + split="corpus", + **self.metadata_dict["dataset"], + ) + queries_raw = datasets.load_dataset( + name="questions", + split=self.metadata.eval_splits[0], + **self.metadata_dict["dataset"], + ) + + self.queries = { + self.metadata.eval_splits[0]: { + str(q["id"]): (q["question"] + " " + q["extra_description"]).strip() + for q in queries_raw + } + } + + self.corpus = { + self.metadata.eval_splits[0]: { + str(d["id"]): {"text": d["article"]} for d in corpus_raw + } + } + + self.relevant_docs = {self.metadata.eval_splits[0]: {}} + for q in queries_raw: + for doc_id in q["article_ids"]: + self.relevant_docs[self.metadata.eval_splits[0]][str(q["id"])] = { + str(doc_id): 1 + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/FQuADRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/FQuADRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..182c87dbba202527b2f788ac2f586dae8d964cb0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/FQuADRetrieval.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class FQuADRetrieval(AbsTaskRetrieval): + _EVAL_SPLITS = ["test", "validation"] + + metadata = TaskMetadata( + name="FQuADRetrieval", + description="This dataset has been built from the French SQuad dataset.", + reference="https://huggingface.co/datasets/manu/fquad2_test", + dataset={ + "path": "manu/fquad2_test", + "revision": "5384ce827bbc2156d46e6fcba83d75f8e6e1b4a6", + }, + type="Retrieval", + category="s2p", + eval_splits=_EVAL_SPLITS, + eval_langs=["fra-Latn"], + main_score="ndcg_at_10", + date=("2019-11-01", "2020-05-01"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Article retrieval"], + license="apache-2.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="created", + bibtex_citation="""@inproceedings{dhoffschmidt-etal-2020-fquad, + title = "{FQ}u{AD}: {F}rench Question Answering Dataset", + author = "d{'}Hoffschmidt, Martin and + Belblidia, Wacim and + Heinrich, Quentin and + Brendl{\'e}, Tom and + Vidal, Maxime", + editor = "Cohn, Trevor and + He, Yulan and + Liu, Yang", + booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020", + month = nov, + year = "2020", + address = "Online", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2020.findings-emnlp.107", + doi = "10.18653/v1/2020.findings-emnlp.107", + pages = "1193--1208", +}""", + n_samples={"test": 400, "validation": 100}, + avg_character_length={"test": 937, "validation": 930}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + dataset_raw = datasets.load_dataset( + **self.metadata_dict["dataset"], + ) + + # set valid_hasAns and test_hasAns as the validation and test splits (only queries with answers) + dataset_raw["validation"] = dataset_raw["valid_hasAns"] + del dataset_raw["valid_hasAns"] + + dataset_raw["test"] = dataset_raw["test_hasAns"] + del dataset_raw["test_hasAns"] + + # rename context column to text + dataset_raw = dataset_raw.rename_column("context", "text") + + self.queries = { + eval_split: { + str(i): q["question"] for i, q in enumerate(dataset_raw[eval_split]) + } + for eval_split in self.metadata_dict["eval_splits"] + } + + self.corpus = { + eval_split: {str(row["title"]): row for row in dataset_raw[eval_split]} + for eval_split in self.metadata_dict["eval_splits"] + } + + self.relevant_docs = { + eval_split: { + str(i): {str(q["title"]): 1} + for i, q in enumerate(dataset_raw[eval_split]) + } + for eval_split in self.metadata_dict["eval_splits"] + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/SyntecRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/SyntecRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..9da6c49e806424d558861c1129ef4bec523d5441 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/SyntecRetrieval.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class SyntecRetrieval(AbsTaskRetrieval): + _EVAL_SPLITS = ["test"] + + metadata = TaskMetadata( + name="SyntecRetrieval", + description="This dataset has been built from the Syntec Collective bargaining agreement.", + reference="https://huggingface.co/datasets/lyon-nlp/mteb-fr-retrieval-syntec-s2p", + dataset={ + "path": "lyon-nlp/mteb-fr-retrieval-syntec-s2p", + "revision": "19661ccdca4dfc2d15122d776b61685f48c68ca9", + }, + type="Retrieval", + category="s2p", + eval_splits=_EVAL_SPLITS, + eval_langs=["fra-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=[], + text_creation=None, + bibtex_citation=None, + n_samples={"test": 90}, + avg_character_length={"test": 62}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + # fetch both subsets of the dataset + corpus_raw = datasets.load_dataset( + name="documents", + **self.metadata_dict["dataset"], + ) + queries_raw = datasets.load_dataset( + name="queries", + **self.metadata_dict["dataset"], + ) + + eval_split = self.metadata_dict["eval_splits"][0] + self.queries = { + eval_split: { + str(i): q["Question"] for i, q in enumerate(queries_raw[eval_split]) + } + } + + corpus_raw = corpus_raw[eval_split] + corpus_raw = corpus_raw.rename_column("content", "text") + self.corpus = {eval_split: {str(row["id"]): row for row in corpus_raw}} + + self.relevant_docs = { + eval_split: { + str(i): {str(q["Article"]): 1} + for i, q in enumerate(queries_raw[eval_split]) + } + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/fra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/hun/HunSum2.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/hun/HunSum2.py new file mode 100644 index 0000000000000000000000000000000000000000..74c8e83c6ee7538f2f7d9cafa30df7eb99102a2d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/hun/HunSum2.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from datasets import load_dataset + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class HunSum2AbstractiveRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="HunSum2AbstractiveRetrieval", + dataset={ + "path": "SZTAKI-HLT/HunSum-2-abstractive", + "revision": "24e1445c8180d937f0a16f8ae8a62e77cc952e56", + }, + description=( + "HunSum-2-abstractive is a Hungarian dataset containing news articles along with lead, titles and metadata." + ), + reference="https://arxiv.org/abs/2404.03555", + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["hun-Latn"], + main_score="ndcg_at_1", + date=( + "1848-12-15", + "2024-03-19", + ), + form=["written"], + domains=["News"], + task_subtypes=["Article retrieval"], + license="CC-BY 4.0", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" +@misc{barta2024news, + title={From News to Summaries: Building a Hungarian Corpus for Extractive and Abstractive Summarization}, + author={Botond Barta and Dorina Lakatos and Attila Nagy and Milán Konor Nyist and Judit Ács}, + year={2024}, + eprint={2404.03555}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""", + n_samples={ + "test": 1998, + }, + avg_character_length={ + "test": 2462.2177177177177, + }, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + self.corpus, self.queries, self.relevant_docs = {}, {}, {} + ds = load_dataset(**self.metadata.dataset, split=self.metadata.eval_splits) + ds = dict(zip(self.metadata.eval_splits, ds)) + for split_name, split in ds.items(): + self.corpus[split_name] = {} + self.queries[split_name] = {} + self.relevant_docs[split_name] = {} + for record in split: + self.corpus[split_name]["d" + record["uuid"]] = { + "title": record["title"], + "text": record["article"], + } + self.queries[split_name]["q" + record["uuid"]] = record["lead"] + self.relevant_docs[split_name]["q" + record["uuid"]] = { + "d" + record["uuid"]: 1 + } + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/jpn/JaQuADRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/jpn/JaQuADRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..c157df1270e68e32c154c203608d4003296a01de --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/jpn/JaQuADRetrieval.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +from datasets import load_dataset + +from mteb.abstasks import AbsTaskRetrieval, TaskMetadata + + +class JaQuADRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="JaQuADRetrieval", + dataset={ + "path": "SkelterLabsInc/JaQuAD", + "revision": "05600ff310a0970823e70f82f428893b85c71ffe", + }, + description="Human-annotated question-answer pairs for Japanese wikipedia pages.", + reference="https://arxiv.org/abs/2202.01764", + type="Retrieval", + category="p2p", + eval_splits=["validation"], + eval_langs=["jpn-Jpan"], + main_score="ndcg_at_10", + date=("2022-01-01", "2022-12-31"), # approximate guess + form=["written"], + domains=["Encyclopaedic", "Non-fiction"], + task_subtypes=["Question answering"], + license="CC-BY-SA-3.0", + socioeconomic_status="high", + annotations_creators="human-annotated", + dialect=None, + text_creation="found", + bibtex_citation="""@misc{so2022jaquad, + title={{JaQuAD: Japanese Question Answering Dataset for Machine Reading Comprehension}}, + author={ByungHoon So and Kyuhong Byun and Kyungwon Kang and Seongjin Cho}, + year={2022}, + eprint={2202.01764}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +}""", + n_samples={"validation": 2048}, + avg_character_length={"validation": 400.75}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + split = self.metadata_dict["eval_splits"][0] + ds = load_dataset(**self.metadata_dict["dataset"], split=split) + ds = ds.shuffle(seed=42) + max_samples = min(2048, len(ds)) + ds = ds.select( + range(max_samples) + ) # limit the dataset size to make sure the task does not take too long to run + title = ds["title"] + question = ds["question"] + context = ds["context"] + answer = [a["text"][0] for a in ds["answers"]] + + self.corpus = {split: {}} + self.relevant_docs = {split: {}} + self.queries = {split: {}} + + text2id = {} + n = 0 + for t, q, cont, ans in zip(title, question, context, answer): + self.queries[split][str(n)] = q + q_n = n + n += 1 + if cont not in text2id: + text2id[cont] = n + self.corpus[split][str(n)] = {"title": t, "text": cont} + n += 1 + if ans not in text2id: + text2id[ans] = n + self.corpus[split][str(n)] = {"title": t, "text": ans} + n += 1 + + self.relevant_docs[split][str(q_n)] = { + str(text2id[ans]): 1, + str(text2id[cont]): 1, + } # only two correct matches + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kat/GeorgianFAQRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kat/GeorgianFAQRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc72af305c138b4606b9c0fffd40abf3a15b6d5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kat/GeorgianFAQRetrieval.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from datasets import DatasetDict, load_dataset + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLIT = "test" + + +class GeorgianFAQRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="GeorgianFAQRetrieval", + dataset={ + "path": "jupyterjazz/georgian-faq", + "revision": "2436d9bda047a80959b034a572fdda4d00c80d2e", + }, + description=( + "Frequently asked questions (FAQs) and answers mined from Georgian websites via Common Crawl." + ), + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["kat-Geor"], + main_score="ndcg_at_10", + domains=["Web"], + text_creation="created", + n_samples={_EVAL_SPLIT: 2566}, + reference="https://huggingface.co/datasets/jupyterjazz/georgian-faq", + date=("2024-05-02", "2024-05-03"), + form=["written"], + task_subtypes=["Question answering"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + bibtex_citation="", + avg_character_length={_EVAL_SPLIT: 572}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + queries = {_EVAL_SPLIT: {}} + corpus = {_EVAL_SPLIT: {}} + relevant_docs = {_EVAL_SPLIT: {}} + + data = load_dataset( + self.metadata_dict["dataset"]["path"], + split=_EVAL_SPLIT, + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + question_ids = { + question: _id for _id, question in enumerate(set(data["question"])) + } + answer_ids = {answer: _id for _id, answer in enumerate(set(data["answer"]))} + + for row in data: + question = row["question"] + answer = row["answer"] + query_id = f"Q{question_ids[question]}" + queries[_EVAL_SPLIT][query_id] = question + doc_id = f"D{answer_ids[answer]}" + corpus[_EVAL_SPLIT][doc_id] = {"text": answer} + if query_id not in relevant_docs[_EVAL_SPLIT]: + relevant_docs[_EVAL_SPLIT][query_id] = {} + relevant_docs[_EVAL_SPLIT][query_id][doc_id] = 1 + + self.corpus = DatasetDict(corpus) + self.queries = DatasetDict(queries) + self.relevant_docs = DatasetDict(relevant_docs) + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kat/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/KoMiracl.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/KoMiracl.py new file mode 100644 index 0000000000000000000000000000000000000000..1c9f3e27ca8a6c182ee520c22368e3ad2b99ab86 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/KoMiracl.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class KoMiracl(AbsTaskRetrieval): + metadata = TaskMetadata( + name="Ko-miracl", + description="Ko-miracl", + reference=None, + dataset={ + "path": "taeminlee/Ko-miracl", + "revision": "5c7690518e481375551916f24241048cf7b017d0", + }, + type="Retrieval", + category="s2p", + eval_splits=["dev"], + eval_langs=["kor-Hang"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/KoStrategyQA.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/KoStrategyQA.py new file mode 100644 index 0000000000000000000000000000000000000000..834e2a67a29ce5402395f8e4134ca30f8010816e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/KoStrategyQA.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class KoStrategyQA(AbsTaskRetrieval): + metadata = TaskMetadata( + name="Ko-StrategyQA", + description="Ko-StrategyQA", + reference=None, + dataset={ + "path": "taeminlee/Ko-StrategyQA", + "revision": "d243889a3eb6654029dbd7e7f9319ae31d58f97c", + }, + type="Retrieval", + category="s2p", + eval_splits=["dev"], + eval_langs=["kor-Hang"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/kor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/BelebeleRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/BelebeleRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d825168d2c9f80cc3dfd1e4f2a14380284e9d9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/BelebeleRetrieval.py @@ -0,0 +1,201 @@ +from __future__ import annotations + +from datasets import load_dataset + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLIT = "test" + +_LANGS = { + "acm": ["acm-Arab"], + "afr": ["afr-Latn"], + "als": ["als-Latn"], + "amh": ["amh-Ethi"], + "apc": ["apc-Arab"], + "arb": ["arb-Arab"], + "ars": ["ars-Arab"], + "ary": ["ary-Arab"], + "arz": ["arz-Arab"], + "asm": ["asm-Beng"], + "azj": ["azj-Latn"], + "bam": ["bam-Latn"], + "ben": ["ben-Beng"], + "bod": ["bod-Tibt"], + "bul": ["bul-Cyrl"], + "cat": ["cat-Latn"], + "ceb": ["ceb-Latn"], + "ces": ["ces-Latn"], + "ckb": ["ckb-Arab"], + "dan": ["dan-Latn"], + "deu": ["deu-Latn"], + "ell": ["ell-Grek"], + "eng": ["eng-Latn"], + "est": ["est-Latn"], + "eus": ["eus-Latn"], + "fin": ["fin-Latn"], + "fra": ["fra-Latn"], + "fuv": ["fuv-Latn"], + "gaz": ["gaz-Latn"], + "grn": ["grn-Latn"], + "guj": ["guj-Gujr"], + "hat": ["hat-Latn"], + "hau": ["hau-Latn"], + "heb": ["heb-Hebr"], + "hin": ["hin-Deva"], + "hrv": ["hrv-Latn"], + "hun": ["hun-Latn"], + "hye": ["hye-Armn"], + "ibo": ["ibo-Latn"], + "ilo": ["ilo-Latn"], + "ind": ["ind-Latn"], + "isl": ["isl-Latn"], + "ita": ["ita-Latn"], + "jav": ["jav-Latn"], + "jpn": ["jpn-Jpan"], + "kac": ["kac-Latn"], + "kan": ["kan-Knda"], + "kat": ["kat-Geor"], + "kaz": ["kaz-Cyrl"], + "kea": ["kea-Latn"], + "khk": ["khk-Cyrl"], + "khm": ["khm-Khmr"], + "kin": ["kin-Latn"], + "kir": ["kir-Cyrl"], + "kor": ["kor-Hang"], + "lao": ["lao-Laoo"], + "lin": ["lin-Latn"], + "lit": ["lit-Latn"], + "lug": ["lug-Latn"], + "luo": ["luo-Latn"], + "lvs": ["lvs-Latn"], + "mal": ["mal-Mlym"], + "mar": ["mar-Deva"], + "mkd": ["mkd-Cyrl"], + "mlt": ["mlt-Latn"], + "mri": ["mri-Latn"], + "mya": ["mya-Mymr"], + "nld": ["nld-Latn"], + "nob": ["nob-Latn"], + "npi": ["npi-Deva"], + "nso": ["nso-Latn"], + "nya": ["nya-Latn"], + "ory": ["ory-Orya"], + "pan": ["pan-Guru"], + "pbt": ["pbt-Arab"], + "pes": ["pes-Arab"], + "plt": ["plt-Latn"], + "pol": ["pol-Latn"], + "por": ["por-Latn"], + "ron": ["ron-Latn"], + "rus": ["rus-Cyrl"], + "shn": ["shn-Mymr"], + "sin": ["sin-Latn"], + "slk": ["slk-Latn"], + "slv": ["slv-Latn"], + "sna": ["sna-Latn"], + "snd": ["snd-Arab"], + "som": ["som-Latn"], + "sot": ["sot-Latn"], + "spa": ["spa-Latn"], + "srp": ["srp-Cyrl"], + "ssw": ["ssw-Latn"], + "sun": ["sun-Latn"], + "swe": ["swe-Latn"], + "swh": ["swh-Latn"], + "tam": ["tam-Taml"], + "tel": ["tel-Telu"], + "tgk": ["tgk-Cyrl"], + "tgl": ["tgl-Latn"], + "tha": ["tha-Thai"], + "tir": ["tir-Ethi"], + "tsn": ["tsn-Latn"], + "tso": ["tso-Latn"], + "tur": ["tur-Latn"], + "ukr": ["ukr-Cyrl"], + "urd": ["urd-Arab"], + "uzn": ["uzn-Latn"], + "vie": ["vie-Latn"], + "war": ["war-Latn"], + "wol": ["wol-Latn"], + "xho": ["xho-Latn"], + "yor": ["yor-Latn"], + "zho": ["zho-Hans"], + "zsm": ["zsm-Latn"], + "zul": ["zul-Latn"], +} + + +class BelebeleRetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="BelebeleRetrieval", + dataset={ + "path": "facebook/belebele", + "revision": "75b399394a9803252cfec289d103de462763db7c", + }, + description=( + "Belebele is a multiple-choice machine reading comprehension (MRC) dataset spanning 115 distinct languages." + ), + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=_LANGS, + reference="https://arxiv.org/abs/2308.16884", + main_score="ndcg_at_10", + license="CC-BY-SA-4.0", + domains=["Web", "News"], + text_creation="created", + n_samples={_EVAL_SPLIT: 103500}, # number of languages * 900 + date=("2023-08-31", "2023-08-31"), + form=["written"], + task_subtypes=["Question answering"], + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + avg_character_length={_EVAL_SPLIT: 568}, # avg length of query-passage pairs + bibtex_citation="""@article{bandarkar2023belebele, + title={The Belebele Benchmark: a Parallel Reading Comprehension Dataset in 122 Language Variants}, + author={Lucas Bandarkar and Davis Liang and Benjamin Muller and Mikel Artetxe and Satya Narayan Shukla and Donald Husa and Naman Goyal and Abhinandan Krishnan and Luke Zettlemoyer and Madian Khabsa}, + year={2023}, + journal={arXiv preprint arXiv:2308.16884} +}""", + ) + + def load_data(self, **kwargs) -> None: + if self.data_loaded: + return + + self.dataset = load_dataset(**self.metadata_dict["dataset"]) + + self.queries = {lang: {_EVAL_SPLIT: {}} for lang in self.langs} + self.corpus = {lang: {_EVAL_SPLIT: {}} for lang in self.langs} + self.relevant_docs = {lang: {_EVAL_SPLIT: {}} for lang in self.langs} + + for lang in self.langs: + belebele_lang = _LANGS[lang][0].replace("-", "_") + ds = self.dataset[belebele_lang] + + question_ids = { + question: _id for _id, question in enumerate(set(ds["question"])) + } + context_ids = { + passage: _id for _id, passage in enumerate(set(ds["flores_passage"])) + } + + for row in ds: + query = row["question"] + query_id = f"Q{question_ids[query]}" + self.queries[lang][_EVAL_SPLIT][query_id] = query + context = row["flores_passage"] + context_id = f"C{context_ids[context]}" + self.corpus[lang][_EVAL_SPLIT][context_id] = { + "title": "", + "text": context, + } + if query_id not in self.relevant_docs[lang][_EVAL_SPLIT]: + self.relevant_docs[lang][_EVAL_SPLIT][query_id] = {} + self.relevant_docs[lang][_EVAL_SPLIT][query_id][context_id] = 1 + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/CrossLingualSemanticDiscriminationWMT19.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/CrossLingualSemanticDiscriminationWMT19.py new file mode 100644 index 0000000000000000000000000000000000000000..dee3850d2d1e7a0bbcbd0e00fab41994177256b6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/CrossLingualSemanticDiscriminationWMT19.py @@ -0,0 +1,129 @@ +from typing import Dict, List + +import datasets + +from mteb.abstasks import AbsTaskRetrieval, CrosslingualTask, TaskMetadata + +_LANGUAGES = { + "wmt19.de.fr": ["deu-Latn", "fra-Latn"], + "wmt19.fr.de": ["fra-Latn", "deu-Latn"], +} + + +def _build_lang_pair(langs: List[str]) -> str: + """Builds a language pair separated by a dash. + e.g., ['eng-Latn', 'deu-Latn'] -> 'eng-deu'. + """ + return langs[0].split("-")[0] + "-" + langs[1].split("-")[0] + + +def extend_lang_pairs() -> Dict[str, List[str]]: + eval_langs = {} + for langs in _LANGUAGES.values(): + lang_pair = _build_lang_pair(langs) + eval_langs[lang_pair] = langs + return eval_langs + + +_EVAL_LANGS = extend_lang_pairs() + + +class CrossLingualSemanticDiscriminationWMT19(AbsTaskRetrieval, CrosslingualTask): + metadata = TaskMetadata( + name="CrossLingualSemanticDiscriminationWMT19", + dataset={ + "path": "Andrianos/clsd_wmt19_21", + "revision": "9627fbdb39b827ee5c066011ebe1e947cdb137bd", + }, + description="Evaluate a multilingual embedding model based on its ability to discriminate against the original parallel pair against challenging distractors - spawning from WMT19 DE-FR test set", + reference="https://huggingface.co/datasets/Andrianos/clsd_wmt19_21", + type="Retrieval", + category="s2s", + eval_splits=["test"], + eval_langs=_EVAL_LANGS, + main_score="recall_at_1", + date=("2018-01-01", "2023-12-12"), + form=["written"], + domains=["News"], + task_subtypes=["Cross-Lingual Semantic Discrimination"], + license="CC BY-SA 4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="LM-generated and verified", + bibtex_citation="preprint_coming", + n_samples={"test": 2946}, + avg_character_length={"test": 161}, + ) + + def __init__(self, **kwargs): + self.num_of_distractors = 4 + super().__init__(**kwargs) + + def load_data(self, **kwargs): + """Generic data loader function for original clsd datasets with the format shown in "hf_dataset_link". + Loading the hf dataset, it populates the following three variables to be used for retrieval evaluation. + + self.corpus + + self.queries + + self.relevant_docs + + Sets self.data_loaded to True. + """ + if self.data_loaded: + return + queries, corpus, relevant_docs = {}, {}, {} + dataset_raw = {} + for split in self.metadata.eval_splits: + for hf_subset, langs in _LANGUAGES.items(): + lang_pair = _build_lang_pair(langs) + dataset_raw[lang_pair] = datasets.load_dataset( + name=hf_subset, + **self.metadata_dict["dataset"], + )[split] + + queries[lang_pair] = {} + corpus[lang_pair] = {} + relevant_docs[lang_pair] = {} + queries[lang_pair][split] = {} + corpus[lang_pair][split] = {} + relevant_docs[lang_pair][split] = {} + + # Generate unique IDs for queries and documents + query_id_counter = 1 + document_id_counter = 1 + + for row in dataset_raw[lang_pair]: + query_text = row["Source"] + positive_text = [row["Target"]] + negative_texts = [ + row[f"TargetAdv{str(i)}"] + for i in range( + 1, self.num_of_distractors + 1 + ) # Four Distractors. Columns are named TargetAdv1-TargetAdv4 + ] + + # Assign unique ID to the query + query_id = f"Q{query_id_counter}" + queries[lang_pair][split][query_id] = query_text + query_id_counter += 1 + + # Add true parallel and distractors to corpus with unique id. + for text in positive_text + negative_texts: + doc_id = f"D{document_id_counter}" + corpus[lang_pair][split][doc_id] = {"text": text} + document_id_counter += 1 + + # Add relevant document information to relevant_docs for positive texts only + if text in positive_text: + if query_id not in relevant_docs[lang_pair][split]: + relevant_docs[lang_pair][split][query_id] = {} + relevant_docs[lang_pair][split][query_id][doc_id] = 1 + + self.corpus = datasets.DatasetDict(corpus) + self.queries = datasets.DatasetDict(queries) + self.relevant_docs = datasets.DatasetDict(relevant_docs) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/CrossLingualSemanticDiscriminationWMT21.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/CrossLingualSemanticDiscriminationWMT21.py new file mode 100644 index 0000000000000000000000000000000000000000..cdc3afc6cff5ce945a37a58403c505a8c6a96eee --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/CrossLingualSemanticDiscriminationWMT21.py @@ -0,0 +1,129 @@ +from typing import Dict, List + +import datasets + +from mteb.abstasks import AbsTaskRetrieval, CrosslingualTask, TaskMetadata + +_LANGUAGES = { + "wmt21.de.fr": ["deu-Latn", "fra-Latn"], + "wmt21.fr.de": ["fra-Latn", "deu-Latn"], +} + + +def _build_lang_pair(langs: List[str]) -> str: + """Builds a language pair separated by a dash. + e.g., ['eng-Latn', 'deu-Latn'] -> 'eng-deu'. + """ + return langs[0].split("-")[0] + "-" + langs[1].split("-")[0] + + +def extend_lang_pairs() -> Dict[str, List[str]]: + eval_langs = {} + for langs in _LANGUAGES.values(): + lang_pair = _build_lang_pair(langs) + eval_langs[lang_pair] = langs + return eval_langs + + +_EVAL_LANGS = extend_lang_pairs() + + +class CrossLingualSemanticDiscriminationWMT21(AbsTaskRetrieval, CrosslingualTask): + metadata = TaskMetadata( + name="CrossLingualSemanticDiscriminationWMT21", + dataset={ + "path": "Andrianos/clsd_wmt19_21", + "revision": "9627fbdb39b827ee5c066011ebe1e947cdb137bd", + }, + description="Evaluate a multilingual embedding model based on its ability to discriminate against the original parallel pair against challenging distractors - spawning from WMT21 DE-FR test set", + reference="https://huggingface.co/datasets/Andrianos/clsd_wmt19_21", + type="Retrieval", + category="s2s", + eval_splits=["test"], + eval_langs=_EVAL_LANGS, + main_score="recall_at_1", + date=("2020-01-01", "2023-12-12"), + form=["written"], + domains=["News"], + task_subtypes=["Cross-Lingual Semantic Discrimination"], + license="CC BY-SA 4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="LM-generated and verified", + bibtex_citation="preprint_coming", + n_samples={"test": 1786}, + avg_character_length={"test": 159}, + ) + + def __init__(self, **kwargs): + self.num_of_distractors = 4 + super().__init__(**kwargs) + + def load_data(self, **kwargs): + """Generic data loader function for original clsd datasets with the format shown in "hf_dataset_link". + Loading the hf dataset, it populates the following three variables to be used for retrieval evaluation. + + self.corpus + + self.queries + + self.relevant_docs + + Sets self.data_loaded to True. + """ + if self.data_loaded: + return + queries, corpus, relevant_docs = {}, {}, {} + dataset_raw = {} + for split in self.metadata.eval_splits: + for hf_subset, langs in _LANGUAGES.items(): + lang_pair = _build_lang_pair(langs) + dataset_raw[lang_pair] = datasets.load_dataset( + name=hf_subset, + **self.metadata_dict["dataset"], + )[split] + + queries[lang_pair] = {} + corpus[lang_pair] = {} + relevant_docs[lang_pair] = {} + queries[lang_pair][split] = {} + corpus[lang_pair][split] = {} + relevant_docs[lang_pair][split] = {} + + # Generate unique IDs for queries and documents + query_id_counter = 1 + document_id_counter = 1 + + for row in dataset_raw[lang_pair]: + query_text = row["Source"] + positive_text = [row["Target"]] + negative_texts = [ + row[f"TargetAdv{str(i)}"] + for i in range( + 1, self.num_of_distractors + 1 + ) # Four Distractors. Columns are named TargetAdv1-TargetAdv4 + ] + + # Assign unique ID to the query + query_id = f"Q{query_id_counter}" + queries[lang_pair][split][query_id] = query_text + query_id_counter += 1 + + # Add true parallel and distractors to corpus with unique id. + for text in positive_text + negative_texts: + doc_id = f"D{document_id_counter}" + corpus[lang_pair][split][doc_id] = {"text": text} + document_id_counter += 1 + + # Add relevant document information to relevant_docs for positive texts only + if text in positive_text: + if query_id not in relevant_docs[lang_pair][split]: + relevant_docs[lang_pair][split][query_id] = {} + relevant_docs[lang_pair][split][query_id][doc_id] = 1 + + self.corpus = datasets.DatasetDict(corpus) + self.queries = datasets.DatasetDict(queries) + self.relevant_docs = datasets.DatasetDict(relevant_docs) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/IndicQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/IndicQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..6daae16c67af4604e7d195ec315b21e86f4cb725 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/IndicQARetrieval.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from hashlib import sha256 + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_LANGUAGES = { + "as": ["asm-Beng"], + "bn": ["ben-Beng"], + "gu": ["guj-Gujr"], + "hi": ["hin-Deva"], + "kn": ["kan-Knda"], + "ml": ["mal-Mlym"], + "mr": ["mar-Deva"], + "or": ["ory-Orya"], + "pa": ["pan-Guru"], + "ta": ["tam-Taml"], + "te": ["tel-Telu"], +} + + +class IndicQARetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="IndicQARetrieval", + dataset={ + "path": "ai4bharat/IndicQA", + "revision": "570d90ae4f7b64fe4fdd5f42fc9f9279b8c9fd9d", + }, + description="IndicQA is a manually curated cloze-style reading comprehension dataset that can be used for evaluating question-answering models in 11 Indic languages. It is repurposed retrieving relevant context for each question.", + reference="https://arxiv.org/abs/2212.05409", + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="ndcg_at_10", + date=("2022-08-01", "2022-12-20"), + form=["written"], + domains=["Web"], + task_subtypes=[], + license="CC0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="machine-translated and verified", + bibtex_citation="""@article{doddapaneni2022towards, + title = {Towards Leaving No Indic Language Behind: Building Monolingual Corpora, Benchmark and Models for Indic Languages}, + author = {Sumanth Doddapaneni and Rahul Aralikatte and Gowtham Ramesh and Shreyansh Goyal and Mitesh M. Khapra and Anoop Kunchukuttan and Pratyush Kumar}, + journal = {Annual Meeting of the Association for Computational Linguistics}, + year = {2022}, + doi = {10.18653/v1/2023.acl-long.693} +}""", + n_samples={"test": 18586}, + avg_character_length={"test": 930.6}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + split = "test" + queries = {lang: {split: {}} for lang in self.hf_subsets} + corpus = {lang: {split: {}} for lang in self.hf_subsets} + relevant_docs = {lang: {split: {}} for lang in self.hf_subsets} + + for lang in self.hf_subsets: + data = datasets.load_dataset( + name=f"indicqa.{lang}", **self.metadata_dict["dataset"] + )[split] + data = data.filter(lambda x: x["answers"]["text"] != "") + data = data.select(range(self.metadata.n_samples[split])) + + question_ids = { + question: sha256(question.encode("utf-8")).hexdigest() + for question in set(data["question"]) + } + context_ids = { + context: sha256(context.encode("utf-8")).hexdigest() + for context in set(data["context"]) + } + + for row in data: + question = row["question"] + context = row["context"] + query_id = question_ids[question] + queries[lang][split][query_id] = question + + doc_id = context_ids[context] + corpus[lang][split][doc_id] = {"text": context} + if query_id not in relevant_docs[lang][split]: + relevant_docs[lang][split][query_id] = {} + relevant_docs[lang][split][query_id][doc_id] = 1 + + self.corpus = datasets.DatasetDict(corpus) + self.queries = datasets.DatasetDict(queries) + self.relevant_docs = datasets.DatasetDict(relevant_docs) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MIRACLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MIRACLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..872b9ffe7904c6d2b1d3ca041f8c019469d527d7 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MIRACLRetrieval.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLIT = "test" + +_LANGS = {"de": ["deu-Latn"], "es": ["spa-Latn"]} + + +def _load_miracl_data( + path: str, langs: list, split: str, cache_dir: str = None, revision: str = None +): + queries = {lang: {split: {}} for lang in langs} + corpus = {lang: {split: {}} for lang in langs} + relevant_docs = {lang: {split: {}} for lang in langs} + + for lang in langs: + data = datasets.load_dataset( + path, + lang, + split=split, + cache_dir=cache_dir, + revision=revision, + ) + # Generate unique IDs for queries and documents + query_id_counter = 1 + document_id_counter = 1 + + for row in data: + query_text = row["query"] + positive_texts = row["positive"] + negative_texts = row["negative"] + + # Assign unique ID to the query + query_id = f"Q{query_id_counter}" + queries[lang][split][query_id] = query_text + query_id_counter += 1 + + # Add positive and negative texts to corpus with unique IDs + for text in positive_texts + negative_texts: + doc_id = f"D{document_id_counter}" + corpus[lang][split][doc_id] = {"text": text} + document_id_counter += 1 + + # Add relevant document information to relevant_docs for positive texts only + if text in positive_texts: + if query_id not in relevant_docs[lang][split]: + relevant_docs[lang][split][query_id] = {} + relevant_docs[lang][split][query_id][doc_id] = 1 + + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + + return corpus, queries, relevant_docs + + +class MIRACLRetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="MIRACLRetrieval", + description="MIRACLRetrieval", + reference=None, + dataset={ + "path": "jinaai/miracl", + "revision": "d28a029f35c4ff7f616df47b0edf54e6882395e6", + }, + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=_LANGS, + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = _load_miracl_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.hf_subsets, + split=self.metadata_dict["eval_splits"][0], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MLQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MLQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..db78ce8ecd1a3030f5cafde8ea012c89671957c6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MLQARetrieval.py @@ -0,0 +1,162 @@ +from typing import Dict, List + +import datasets + +from mteb.abstasks import AbsTaskRetrieval, CrosslingualTask, TaskMetadata + +_LANGUAGES = { + "mlqa.ar.ar": ["ara-Arab", "ara-Arab"], + "mlqa.ar.de": ["ara-Arab", "deu-Latn"], + "mlqa.ar.en": ["ara-Arab", "eng-Latn"], + "mlqa.ar.es": ["ara-Arab", "spa-Latn"], + "mlqa.ar.hi": ["ara-Arab", "hin-Deva"], + "mlqa.ar.vi": ["ara-Arab", "vie-Latn"], + "mlqa.ar.zh": ["ara-Arab", "zho-Hans"], + "mlqa.de.ar": ["deu-Latn", "ara-Arab"], + "mlqa.de.de": ["deu-Latn", "deu-Latn"], + "mlqa.de.en": ["deu-Latn", "eng-Latn"], + "mlqa.de.es": ["deu-Latn", "spa-Latn"], + "mlqa.de.hi": ["deu-Latn", "hin-Deva"], + "mlqa.de.vi": ["deu-Latn", "vie-Latn"], + "mlqa.de.zh": ["deu-Latn", "zho-Hans"], + "mlqa.en.ar": ["eng-Latn", "ara-Arab"], + "mlqa.en.de": ["eng-Latn", "deu-Latn"], + "mlqa.en.en": ["eng-Latn", "eng-Latn"], + "mlqa.en.es": ["eng-Latn", "spa-Latn"], + "mlqa.en.hi": ["eng-Latn", "hin-Deva"], + "mlqa.en.vi": ["eng-Latn", "vie-Latn"], + "mlqa.en.zh": ["eng-Latn", "zho-Hans"], + "mlqa.es.ar": ["spa-Latn", "ara-Arab"], + "mlqa.es.de": ["spa-Latn", "deu-Latn"], + "mlqa.es.en": ["spa-Latn", "eng-Latn"], + "mlqa.es.es": ["spa-Latn", "spa-Latn"], + "mlqa.es.hi": ["spa-Latn", "hin-Deva"], + "mlqa.es.vi": ["spa-Latn", "vie-Latn"], + "mlqa.es.zh": ["spa-Latn", "zho-Hans"], + "mlqa.hi.ar": ["hin-Deva", "ara-Arab"], + "mlqa.hi.de": ["hin-Deva", "deu-Latn"], + "mlqa.hi.en": ["hin-Deva", "eng-Latn"], + "mlqa.hi.es": ["hin-Deva", "spa-Latn"], + "mlqa.hi.hi": ["hin-Deva", "hin-Deva"], + "mlqa.hi.vi": ["hin-Deva", "vie-Latn"], + "mlqa.hi.zh": ["hin-Deva", "zho-Hans"], + "mlqa.vi.ar": ["vie-Latn", "ara-Arab"], + "mlqa.vi.de": ["vie-Latn", "deu-Latn"], + "mlqa.vi.en": ["vie-Latn", "eng-Latn"], + "mlqa.vi.es": ["vie-Latn", "spa-Latn"], + "mlqa.vi.hi": ["vie-Latn", "hin-Deva"], + "mlqa.vi.vi": ["vie-Latn", "vie-Latn"], + "mlqa.vi.zh": ["vie-Latn", "zho-Hans"], + "mlqa.zh.ar": ["zho-Hans", "ara-Arab"], + "mlqa.zh.de": ["zho-Hans", "deu-Latn"], + "mlqa.zh.en": ["zho-Hans", "eng-Latn"], + "mlqa.zh.es": ["zho-Hans", "spa-Latn"], + "mlqa.zh.hi": ["zho-Hans", "hin-Deva"], + "mlqa.zh.vi": ["zho-Hans", "vie-Latn"], + "mlqa.zh.zh": ["zho-Hans", "zho-Hans"], +} + + +def _build_lang_pair(langs: List[str]) -> str: + """Builds a language pair separated by a dash. + e.g., ['eng-Latn', 'deu-Latn'] -> 'eng-deu'. + """ + return langs[0].split("-")[0] + "-" + langs[1].split("-")[0] + + +def extend_lang_pairs() -> Dict[str, List[str]]: + eval_langs = {} + for langs in _LANGUAGES.values(): + lang_pair = _build_lang_pair(langs) + eval_langs[lang_pair] = langs + return eval_langs + + +_EVAL_LANGS = extend_lang_pairs() + + +class MLQARetrieval(AbsTaskRetrieval, CrosslingualTask): + metadata = TaskMetadata( + name="MLQARetrieval", + description="""MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance. + MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic, + German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between + 4 different languages on average.""", + reference="https://huggingface.co/datasets/mlqa", + dataset={ + "path": "facebook/mlqa", + "revision": "397ed406c1a7902140303e7faf60fff35b58d285", + }, + type="Retrieval", + category="s2p", + eval_splits=["validation", "test"], + eval_langs=_EVAL_LANGS, + main_score="ndcg_at_10", + date=("2019-01-01", "2020-12-31"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Question answering"], + license="cc-by-sa-3.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@article{lewis2019mlqa, + title = {MLQA: Evaluating Cross-lingual Extractive Question Answering}, + author = {Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger}, + journal = {arXiv preprint arXiv:1910.07475}, + year = 2019, + eid = {arXiv: 1910.07475} + }""", + n_samples={"test": 158083, "validation": 15747}, + avg_character_length={ + "test": 37352.28, + "validation": 36952.7, + }, # avergae context lengths + ) + + def load_data(self, **kwargs): + """In this retrieval datasets, corpus is in lang XX and queries in lang YY.""" + if self.data_loaded: + return + + _dataset_raw = {} + self.queries, self.corpus, self.relevant_docs = {}, {}, {} + + for hf_subset, langs in _LANGUAGES.items(): + # Builds a language pair separated by an underscore. e.g., "ara-Arab_eng-Latn". + # Corpus is in ara-Arab and queries in eng-Latn + lang_pair = _build_lang_pair(langs) + + _dataset_raw[lang_pair] = datasets.load_dataset( + name=hf_subset, + **self.metadata_dict["dataset"], + ) + _dataset_raw[lang_pair] = _dataset_raw[lang_pair].rename_column( + "context", "text" + ) + + self.queries[lang_pair] = { + eval_split: { + str(i): q["question"] + for i, q in enumerate(_dataset_raw[lang_pair][eval_split]) + } + for eval_split in self.metadata_dict["eval_splits"] + } + + self.corpus[lang_pair] = { + eval_split: { + str(row["id"]): row for row in _dataset_raw[lang_pair][eval_split] + } + for eval_split in self.metadata_dict["eval_splits"] + } + + self.relevant_docs[lang_pair] = { + eval_split: { + str(i): {str(q["id"]): 1} + for i, q in enumerate(_dataset_raw[lang_pair][eval_split]) + } + for eval_split in self.metadata_dict["eval_splits"] + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MintakaRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MintakaRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..ce269f89abe39a0220c36797a0c0ab31bdea2a89 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MintakaRetrieval.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLIT = "test" +_LANGS = { + "ar": ["ara-Arab"], + "de": ["deu-Latn"], + "es": ["spa-Latn"], + "fr": ["fra-Latn"], + "hi": ["hin-Deva"], + "it": ["ita-Latn"], + "ja": ["jpn-Hira"], + "pt": ["por-Latn"], +} + + +def _load_mintaka_data( + path: str, langs: list, split: str, cache_dir: str = None, revision: str = None +): + queries = {lang: {split: {}} for lang in langs} + corpus = {lang: {split: {}} for lang in langs} + relevant_docs = {lang: {split: {}} for lang in langs} + + for lang in langs: + data = datasets.load_dataset( + path, + lang, + split=split, + cache_dir=cache_dir, + revision=revision, + ) + question_ids = { + question: _id for _id, question in enumerate(set(data["question"])) + } + answer_ids = {answer: _id for _id, answer in enumerate(set(data["answer"]))} + + for row in data: + question = row["question"] + answer = row["answer"] + query_id = f"Q{question_ids[question]}" + queries[lang][split][query_id] = question + doc_id = f"D{answer_ids[answer]}" + corpus[lang][split][doc_id] = {"text": answer} + if query_id not in relevant_docs[lang][split]: + relevant_docs[lang][split][query_id] = {} + relevant_docs[lang][split][query_id][doc_id] = 1 + + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + + return corpus, queries, relevant_docs + + +class MintakaRetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="MintakaRetrieval", + description="MintakaRetrieval", + reference=None, + dataset={ + "path": "jinaai/mintakaqa", + "revision": "efa78cc2f74bbcd21eff2261f9e13aebe40b814e", + }, + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=_LANGS, + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = _load_mintaka_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.metadata.eval_langs, + split=self.metadata_dict["eval_splits"][0], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MultiLongDocRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MultiLongDocRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..cd7a1dc27f1876fcf1997794c4b670cd39a46e52 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/MultiLongDocRetrieval.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskRetrieval, MultilingualTask +from ....abstasks.AbsTaskRetrieval import * + +_LANGUAGES = { + "ar": ["ara-Arab"], + "de": ["deu-Latn"], + "en": ["eng-Latn"], + "es": ["spa-Latn"], + "fr": ["fra-Latn"], + "hi": ["hin-Deva"], + "it": ["ita-Latn"], + "ja": ["jpn-Jpan"], + "ko": ["kor-Hang"], + "pt": ["por-Latn"], + "ru": ["rus-Cyrl"], + "th": ["tha-Thai"], + "zh": ["cmn-Hans"], +} + + +def load_mldr_data( + path: str, + langs: list, + eval_splits: list, + cache_dir: str = None, + revision: str = None, +): + corpus = {lang: {split: None for split in eval_splits} for lang in langs} + queries = {lang: {split: None for split in eval_splits} for lang in langs} + relevant_docs = {lang: {split: None for split in eval_splits} for lang in langs} + + for lang in langs: + lang_corpus = datasets.load_dataset( + path, f"corpus-{lang}", cache_dir=cache_dir, revision=revision + )["corpus"] + lang_corpus = {e["docid"]: {"text": e["text"]} for e in lang_corpus} + lang_data = datasets.load_dataset(path, lang, cache_dir=cache_dir) + for split in eval_splits: + corpus[lang][split] = lang_corpus + queries[lang][split] = {e["query_id"]: e["query"] for e in lang_data[split]} + relevant_docs[lang][split] = { + e["query_id"]: {e["positive_passages"][0]["docid"]: 1} + for e in lang_data[split] + } + + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + return corpus, queries, relevant_docs + + +class MultiLongDocRetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="MultiLongDocRetrieval", + description="MultiLongDocRetrieval", + reference="https://arxiv.org/abs/2402.03216", + dataset={ + "path": "Shitao/MLDR", + "revision": "d67138e705d963e346253a80e59676ddb418810a", + }, + type="Retrieval", + category="s2p", + eval_splits=["dev", "test"], + eval_langs=_LANGUAGES, + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation="""@misc{bge-m3, + title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation}, + author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu}, + year={2024}, + eprint={2402.03216}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""", + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = load_mldr_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.metadata.eval_langs, + eval_splits=self.metadata_dict["eval_splits"], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/NeuCLIR2022Retrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/NeuCLIR2022Retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..5d60ba39dfb8733897377f116c92ba598b54d400 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/NeuCLIR2022Retrieval.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +from collections import defaultdict + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskRetrieval, MultilingualTask +from ....abstasks.AbsTaskRetrieval import * + +_LANGUAGES = { + "fas": ["fas-Arab"], + "rus": ["rus-Cyrl"], + "zho": ["zho-Hans"], +} + + +def load_neuclir_data( + path: str, + langs: list, + eval_splits: list, + cache_dir: str | None = None, + revision: str | None = None, +): + corpus = {lang: {split: None for split in eval_splits} for lang in langs} + queries = {lang: {split: None for split in eval_splits} for lang in langs} + relevant_docs = {lang: {split: None for split in eval_splits} for lang in langs} + + for lang in langs: + lang_corpus = datasets.load_dataset( + path, f"corpus-{lang}", cache_dir=cache_dir, revision=revision + )["corpus"] + lang_queries = datasets.load_dataset( + path, f"queries-{lang}", cache_dir=cache_dir, revision=revision + )["queries"] + lang_qrels = datasets.load_dataset( + path, f"{lang}", cache_dir=cache_dir, revision=revision + )["test"] + corpus[lang] = { + "test": { + str(e["_id"]): {"text": e["text"], "title": e["title"]} + for e in lang_corpus + } + } + queries[lang] = {"test": {str(e["_id"]): e["text"] for e in lang_queries}} + relevant_docs[lang]["test"] = defaultdict(dict) + for item in lang_qrels: + relevant_docs[lang]["test"][str(item["query-id"])].update( + {str(item["corpus-id"]): item["score"]} + ) + + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + return corpus, queries, relevant_docs + + +class NeuCLIR2022Retrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="NeuCLIR2022Retrieval", + description="The task involves identifying and retrieving the documents that are relevant to the queries.", + reference="https://neuclir.github.io/", + dataset={ + "path": "mteb/neuclir-2022", + "revision": "920fc15b81e2324e52163904be663f340235cdea", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="ndcg_at_20", + date=("2021-08-01", "2022-06-30"), + form=["written"], + domains=["News"], + task_subtypes=[], + license="odc-by", + socioeconomic_status="medium", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@article{lawrie2023overview, + title={Overview of the TREC 2022 NeuCLIR track}, + author={Lawrie, Dawn and MacAvaney, Sean and Mayfield, James and McNamee, Paul and Oard, Douglas W and Soldaini, Luca and Yang, Eugene}, + journal={arXiv preprint arXiv:2304.12367}, + year={2023} +}""", + n_samples={"fas": 2232130, "zho": 3179323, "rus": 4627657}, + avg_character_length={ + "fas": 3500.5143969099317, + "zho": 2543.1140667919617, + "rus": 3214.755239654659, + }, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = load_neuclir_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.metadata.eval_langs, + eval_splits=self.metadata_dict["eval_splits"], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/NeuCLIR2023Retrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/NeuCLIR2023Retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..5f578cb98e55791a9d48da0d2f41597c97e741bd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/NeuCLIR2023Retrieval.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +from collections import defaultdict + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import AbsTaskRetrieval, MultilingualTask +from ....abstasks.AbsTaskRetrieval import * + +_LANGUAGES = { + "fas": ["fas-Arab"], + "rus": ["rus-Cyrl"], + "zho": ["zho-Hans"], +} + + +def load_neuclir_data( + path: str, + langs: list, + eval_splits: list, + cache_dir: str | None = None, + revision: str | None = None, +): + corpus = {lang: {split: None for split in eval_splits} for lang in langs} + queries = {lang: {split: None for split in eval_splits} for lang in langs} + relevant_docs = {lang: {split: None for split in eval_splits} for lang in langs} + + for lang in langs: + lang_corpus = datasets.load_dataset( + path, f"corpus-{lang}", cache_dir=cache_dir, revision=revision + )["corpus"] + lang_queries = datasets.load_dataset( + path, f"queries-{lang}", cache_dir=cache_dir, revision=revision + )["queries"] + lang_qrels = datasets.load_dataset( + path, f"{lang}", cache_dir=cache_dir, revision=revision + )["test"] + corpus[lang] = { + "test": { + str(e["_id"]): {"text": e["text"], "title": e["title"]} + for e in lang_corpus + } + } + queries[lang] = {"test": {str(e["_id"]): e["text"] for e in lang_queries}} + relevant_docs[lang]["test"] = defaultdict(dict) + for item in lang_qrels: + relevant_docs[lang]["test"][str(item["query-id"])].update( + {str(item["corpus-id"]): item["score"]} + ) + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + return corpus, queries, relevant_docs + + +class NeuCLIR2023Retrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="NeuCLIR2023Retrieval", + description="The task involves identifying and retrieving the documents that are relevant to the queries.", + reference="https://neuclir.github.io/", + dataset={ + "path": "mteb/neuclir-2023", + "revision": "dfad7cc7fe4064d6568d6b7d43b99e3a0246d29b", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=_LANGUAGES, + main_score="ndcg_at_20", + date=("2022-08-01", "2023-06-30"), + form=["written"], + domains=["News"], + task_subtypes=[], + license="odc-by", + socioeconomic_status="medium", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@misc{lawrie2024overview, + title={Overview of the TREC 2023 NeuCLIR Track}, + author={Dawn Lawrie and Sean MacAvaney and James Mayfield and Paul McNamee and Douglas W. Oard and Luca Soldaini and Eugene Yang}, + year={2024}, + eprint={2404.08071}, + archivePrefix={arXiv}, + primaryClass={cs.IR} +}""", + n_samples={"fas": 2232092, "zho": 3179285, "rus": 4627619}, + avg_character_length={ + "fas": 3579.508213937439, + "zho": 2704.44834488453, + "rus": 3466.8192213553616, + }, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = load_neuclir_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.metadata.eval_langs, + eval_splits=self.metadata_dict["eval_splits"], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/PublicHealthQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/PublicHealthQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d394a88d13c76ca2a08fd7078af3d9afe9b3984e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/PublicHealthQARetrieval.py @@ -0,0 +1,112 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLIT = "test" + +_LANGS = { + # - + "arabic": ["ara-Arab"], + "chinese": ["zho-Hans"], + "english": ["eng-Latn"], + "french": ["fra-Latn"], + "korean": ["kor-Hang"], + "russian": ["rus-Cyrl"], + "spanish": ["spa-Latn"], + "vietnamese": ["vie-Latn"], +} + + +def _load_publichealthqa_data( + path: str, langs: list, split: str, cache_dir: str = None, revision: str = None +): + queries = {lang: {split: {}} for lang in langs} + corpus = {lang: {split: {}} for lang in langs} + relevant_docs = {lang: {split: {}} for lang in langs} + + for lang in langs: + data = datasets.load_dataset( + path, + lang, + split=split, + cache_dir=cache_dir, + revision=revision, + ) + question_ids = { + question: _id for _id, question in enumerate(set(data["question"])) + } + answer_ids = {answer: _id for _id, answer in enumerate(set(data["answer"]))} + + for row in data: + question = row["question"] + answer = row["answer"] + query_id = f"Q{question_ids[question]}" + queries[lang][split][query_id] = question + doc_id = f"D{answer_ids[answer]}" + corpus[lang][split][doc_id] = {"text": answer} + if query_id not in relevant_docs[lang][split]: + relevant_docs[lang][split][query_id] = {} + relevant_docs[lang][split][query_id][doc_id] = 1 + + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + + return corpus, queries, relevant_docs + + +class PublicHealthQARetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="PublicHealthQA", + description="A multilingual dataset for public health question answering, based on FAQ sourced from CDC and WHO.", + dataset={ + "path": "xhluca/publichealth-qa", + "revision": "main", + }, + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=_LANGS, + main_score="ndcg_at_10", + reference="https://huggingface.co/datasets/xhluca/publichealth-qa", + date=("2020-01-01", "2020-04-15"), + form=["written"], + domains=["Medical", "Government", "Web"], + task_subtypes=["Question answering"], + license="CC BY-NC-SA 3.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" +@misc {xing_han_lu_2024, + author = { {Xing Han Lu} }, + title = { publichealth-qa (Revision 3b67b6b) }, + year = 2024, + url = { https://huggingface.co/datasets/xhluca/publichealth-qa }, + doi = { 10.57967/hf/2247 }, + publisher = { Hugging Face } +} +""", + n_samples={"test": 888}, + avg_character_length={"test": 778.1655}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = _load_publichealthqa_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.hf_subsets, + split=self.metadata_dict["eval_splits"][0], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/StatcanDialogueDatasetRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/StatcanDialogueDatasetRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..253aff0352df497f73084d5500141c9fb8cd7d03 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/StatcanDialogueDatasetRetrieval.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +import json + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLITS = ["dev", "test"] + +_LANGS = { + # - + "english": ["eng-Latn"], + "french": ["fra-Latn"], +} + + +def _load_statcan_data( + path: str, langs: list, splits: str, cache_dir: str = None, revision: str = None +): + queries = {lang: {split: {} for split in splits} for lang in langs} + corpus = {lang: {split: {} for split in splits} for lang in langs} + relevant_docs = {lang: {split: {} for split in splits} for lang in langs} + + for split in splits: + for lang in langs: + query_table = datasets.load_dataset( + path, + f"queries_{lang}", + split=split, + cache_dir=cache_dir, + revision=revision, + ) + corpus_table = datasets.load_dataset( + path, + "corpus", + split=lang, + cache_dir=cache_dir, + revision=revision, + ) + + for row in query_table: + query = json.loads(row["query"]) + query_id = row["query_id"] + doc_id = row["doc_id"] + queries[lang][split][query_id] = query + if query_id not in relevant_docs[lang][split]: + relevant_docs[lang][split][query_id] = {} + relevant_docs[lang][split][query_id][doc_id] = 1 + + for row in corpus_table: + doc_id = row["doc_id"] + doc_content = row["doc"] + corpus[lang][split][doc_id] = {"text": doc_content} + + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + + return corpus, queries, relevant_docs + + +class StatcanDialogueDatasetRetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="StatcanDialogueDatasetRetrieval", + description="A Dataset for Retrieving Data Tables through Conversations with Genuine Intents, available in English and French.", + dataset={ + "path": "McGill-NLP/statcan-dialogue-dataset-retrieval", + "revision": "7a26938c93e99e0759a1df416896bb72527e2f33", + }, + type="Retrieval", + category="s2p", + eval_splits=_EVAL_SPLITS, + eval_langs=_LANGS, + main_score="recall_at_10", + reference="https://mcgill-nlp.github.io/statcan-dialogue-dataset/", + date=("2020-01-01", "2020-04-15"), + form=["written"], + domains=["Government", "Web"], + task_subtypes=["Conversational retrieval"], + license="https://huggingface.co/datasets/McGill-NLP/statcan-dialogue-dataset-retrieval/blob/main/LICENSE.md", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" +@inproceedings{lu-etal-2023-statcan, + title = "The {S}tat{C}an Dialogue Dataset: Retrieving Data Tables through Conversations with Genuine Intents", + author = "Lu, Xing Han and + Reddy, Siva and + de Vries, Harm", + booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics", + month = may, + year = "2023", + address = "Dubrovnik, Croatia", + publisher = "Association for Computational Linguistics", + url = "https://arxiv.org/abs/2304.01412", + pages = "2799--2829", +} +""", + n_samples={"dev": 1000, "test": 1011, "corpus": 5907}, + avg_character_length={"dev": 776.58, "test": 857.13, "corpus": 6806.97}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = _load_statcan_data( + path=self.metadata_dict["dataset"]["path"], + langs=list(_LANGS.keys()), + splits=self.metadata_dict["eval_splits"], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/WikipediaRetrievalMultilingual.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/WikipediaRetrievalMultilingual.py new file mode 100644 index 0000000000000000000000000000000000000000..c08679550df328aa51b3d8dd5fd32325460255ea --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/WikipediaRetrievalMultilingual.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +from datasets import load_dataset + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_LANGS = { + "bg": ["bul-Cyrl"], + "bn": ["ben-Beng"], + "cs": ["ces-Latn"], + "da": ["dan-Latn"], + "de": ["deu-Latn"], + "en": ["eng-Latn"], + "fa": ["fas-Arab"], + "fi": ["fin-Latn"], + "hi": ["hin-Deva"], + "it": ["ita-Latn"], + "nl": ["nld-Latn"], + "pt": ["por-Latn"], + "ro": ["ron-Latn"], + "sr": ["srp-Cyrl"], + "no": ["nor-Latn"], + "sv": ["swe-Latn"], +} + + +# adapted from MIRACLRetrieval +def _load_data( + path: str, + langs: list, + split: str, + cache_dir: str = None, + revision_queries: str = None, + revision_corpus: str = None, + revision_qrels: str = None, +): + queries = {lang: {split: {}} for lang in langs} + corpus = {lang: {split: {}} for lang in langs} + qrels = {lang: {split: {}} for lang in langs} + + for lang in langs: + queries_path = path + corpus_path = path.replace("queries", "corpus") + qrels_path = path.replace("queries", "qrels") + queries_lang = load_dataset( + queries_path, + lang, + split=split, + cache_dir=cache_dir, + revision=revision_queries, + ) + corpus_lang = load_dataset( + corpus_path, + lang, + split=split, + cache_dir=cache_dir, + revision=revision_corpus, + ) + qrels_lang = load_dataset( + qrels_path, + lang, + split=split, + cache_dir=cache_dir, + revision=revision_qrels, + ) + # don't pass on titles to make task harder + corpus_lang_dict = {doc["_id"]: {"text": doc["text"]} for doc in corpus_lang} + queries_lang_dict = { + query["_id"]: {"text": query["text"]} for query in queries_lang + } + # qrels_lang_dict = {qrel["query-id"]: {qrel["corpus-id"]: qrel["score"]} for qrel in qrels_lang} + + qrels_lang_dict = {} + for qrel in qrels_lang: + if qrel["score"] == 0.5: + continue + # score = 0 if qrel["score"] == 0.5 else qrel["score"] + # score = int(score) + score = int(qrel["score"]) + qrels_lang_dict[qrel["query-id"]] = {qrel["corpus-id"]: score} + + corpus[lang][split] = corpus_lang_dict + queries[lang][split] = queries_lang_dict + qrels[lang][split] = qrels_lang_dict + + return corpus, queries, qrels + + +class WikipediaRetrievalMultilingual(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="WikipediaRetrievalMultilingual", + description="The dataset is derived from Cohere's wikipedia-2023-11 dataset and contains synthetically generated queries.", + reference="https://huggingface.co/datasets/ellamind/wikipedia-2023-11-retrieval-pt", + dataset={ + "path": "ellamind/wikipedia-2023-11-retrieval-multilingual-queries", + "revision": "3b6ea595c94bac3448a2ad167ca2e06abd340d6e", # avoid validation error + "revision_corpus": "f20ac0c449c85358d3d5c72a95f92f1eddc98aa5", + "revision_qrels": "ec88a7bb2da034d538e98e3122d2c98530ca1c8d", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=_EVAL_LANGS, + main_score="ndcg_at_10", + date=("2023-11-01", "2024-05-15"), + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Question answering", "Article retrieval"], + license="cc-by-sa-3.0", + socioeconomic_status="mixed", + annotations_creators="LM-generated", + dialect=[], + text_creation="LM-generated and verified", + bibtex_citation="", + n_samples={ + "en": 1500, + "de": 1500, + "it": 1500, + "pt": 1500, + "nl": 1500, + "cs": 1500, + "ro": 1500, + "bg": 1500, + "sr": 1500, + "fi": 1500, + "da": 1500, + "fa": 1500, + "hi": 1500, + "bn": 1500, + "no": 1500, + "sv": 1500, + }, + avg_character_length={"test": 452}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = _load_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.hf_subsets, + split=self.metadata_dict["eval_splits"][0], + cache_dir=kwargs.get("cache_dir", None), + revision_queries=self.metadata_dict["dataset"]["revision"], + revision_corpus=self.metadata_dict["dataset"]["revision_corpus"], + revision_qrels=self.metadata_dict["dataset"]["revision_qrels"], + ) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XMarketRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XMarketRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e8e9eda83549ffee9d65c4e8e030ac17047f58 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XMarketRetrieval.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_EVAL_SPLIT = "test" + +_EVAL_LANGS = { + "de": ["deu-Latn"], + "en": ["eng-Latn"], + "es": ["spa-Latn"], +} + + +def _load_xmarket_data( + path: str, langs: list, split: str, cache_dir: str = None, revision: str = None +): + corpus = {lang: {split: None} for lang in langs} + queries = {lang: {split: None} for lang in langs} + relevant_docs = {lang: {split: None} for lang in langs} + + for lang in langs: + corpus_rows = datasets.load_dataset( + path, + f"corpus-{lang}", + languages=[lang], + split=split, + cache_dir=cache_dir, + ) + query_rows = datasets.load_dataset( + path, + f"queries-{lang}", + languages=[lang], + revision=revision, + split=split, + cache_dir=cache_dir, + ) + qrels_rows = datasets.load_dataset( + path, + f"qrels-{lang}", + languages=[lang], + revision=revision, + split=split, + cache_dir=cache_dir, + ) + + corpus[lang][split] = {row["_id"]: row for row in corpus_rows} + queries[lang][split] = {row["_id"]: row["text"] for row in query_rows} + relevant_docs[lang][split] = { + row["_id"]: {v: 1 for v in row["text"].split(" ")} for row in qrels_rows + } + + corpus = datasets.DatasetDict(corpus) + queries = datasets.DatasetDict(queries) + relevant_docs = datasets.DatasetDict(relevant_docs) + + return corpus, queries, relevant_docs + + +class XMarket(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="XMarket", + description="XMarket", + reference=None, + dataset={ + "path": "jinaai/xmarket_ml", + "revision": "dfe57acff5b62c23732a7b7d3e3fb84ff501708b", + }, + type="Retrieval", + category="s2p", + eval_splits=[_EVAL_SPLIT], + eval_langs=_EVAL_LANGS, + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + self.corpus, self.queries, self.relevant_docs = _load_xmarket_data( + path=self.metadata_dict["dataset"]["path"], + langs=self.metadata.eval_langs, + split=self.metadata_dict["eval_splits"][0], + cache_dir=kwargs.get("cache_dir", None), + revision=self.metadata_dict["dataset"]["revision"], + ) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XPQARetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XPQARetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..f87f744d84520b537aa5d1fd4d356922885eeca6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XPQARetrieval.py @@ -0,0 +1,177 @@ +from typing import List + +import datasets + +from mteb.abstasks import AbsTaskRetrieval, CrosslingualTask, TaskMetadata + +_EVAL_LANGS = { + "ara-ara": ["ara-Arab", "ara-Arab"], + "eng-ara": ["eng-Latn", "ara-Arab"], + "ara-eng": ["ara-Arab", "eng-Latn"], + "deu-deu": ["deu-Latn", "deu-Latn"], + "eng-deu": ["eng-Latn", "deu-Latn"], + "deu-eng": ["deu-Latn", "eng-Latn"], + "spa-spa": ["spa-Latn", "spa-Latn"], + "eng-spa": ["eng-Latn", "spa-Latn"], + "spa-eng": ["spa-Latn", "eng-Latn"], + "fra-fra": ["fra-Latn", "fra-Latn"], + "eng-fra": ["eng-Latn", "fra-Latn"], + "fra-eng": ["fra-Latn", "eng-Latn"], + "hin-hin": ["hin-Deva", "hin-Deva"], + "eng-hin": ["eng-Latn", "hin-Deva"], + "hin-eng": ["hin-Deva", "eng-Latn"], + "ita-ita": ["ita-Latn", "ita-Latn"], + "eng-ita": ["eng-Latn", "ita-Latn"], + "ita-eng": ["ita-Latn", "eng-Latn"], + "jpn-jpn": ["jpn-Hira", "jpn-Hira"], + "eng-jpn": ["eng-Latn", "jpn-Hira"], + "jpn-eng": ["jpn-Hira", "eng-Latn"], + "kor-kor": ["kor-Hang", "kor-Hang"], + "eng-kor": ["eng-Latn", "kor-Hang"], + "kor-eng": ["kor-Hang", "eng-Latn"], + "pol-pol": ["pol-Latn", "pol-Latn"], + "eng-pol": ["eng-Latn", "pol-Latn"], + "pol-eng": ["pol-Latn", "eng-Latn"], + "por-por": ["por-Latn", "por-Latn"], + "eng-por": ["eng-Latn", "por-Latn"], + "por-eng": ["por-Latn", "eng-Latn"], + "tam-tam": ["tam-Taml", "tam-Taml"], + "eng-tam": ["eng-Latn", "tam-Taml"], + "tam-eng": ["tam-Taml", "eng-Latn"], + "cmn-cmn": ["cmn-Hans", "cmn-Hans"], + "eng-cmn": ["eng-Latn", "cmn-Hans"], + "cmn-eng": ["cmn-Hans", "eng-Latn"], +} + +_LANG_CONVERSION = { + "ara": "ar", + "deu": "de", + "spa": "es", + "fra": "fr", + "hin": "hi", + "ita": "it", + "jpn": "ja", + "kor": "ko", + "pol": "pl", + "por": "pt", + "tam": "ta", + "cmn": "zh", + "eng": "en", +} + + +class XPQARetrieval(AbsTaskRetrieval, CrosslingualTask): + metadata = TaskMetadata( + name="XPQARetrieval", + description="XPQARetrieval", + reference="https://arxiv.org/abs/2305.09249", + dataset={ + "path": "jinaai/xpqa", + "revision": "c99d599f0a6ab9b85b065da6f9d94f9cf731679f", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=_EVAL_LANGS, + main_score="ndcg_at_10", + date=("2022-01-01", "2023-07-31"), # best guess + form=["written"], + domains=["Reviews"], + task_subtypes=["Question answering"], + license="CDLA-Sharing-1.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{shen2023xpqa, + title={xPQA: Cross-Lingual Product Question Answering in 12 Languages}, + author={Shen, Xiaoyu and Asai, Akari and Byrne, Bill and De Gispert, Adria}, + booktitle={Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 5: Industry Track)}, + pages={103--115}, + year={2023} + }""", + n_samples={"test": 19801}, + avg_character_length={"test": 104.68}, # answer + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + path = self.metadata_dict["dataset"]["path"] + revision = self.metadata_dict["dataset"]["revision"] + eval_splits = self.metadata_dict["eval_splits"] + dataset = _load_dataset_csv(path, revision, eval_splits) + + self.queries, self.corpus, self.relevant_docs = {}, {}, {} + for lang_pair, _ in self.metadata.eval_langs.items(): + lang_corpus, lang_question = ( + lang_pair.split("-")[0], + lang_pair.split("-")[1], + ) + lang_not_english = lang_corpus if lang_corpus != "eng" else lang_question + dataset_language = dataset.filter( + lambda x: x["lang"] == _LANG_CONVERSION.get(lang_not_english) + ) + question_key = "question_en" if lang_question == "eng" else "question" + corpus_key = "candidate" if lang_corpus == "eng" else "answer" + + queries_to_ids = { + eval_split: { + q: str(_id) + for _id, q in enumerate( + set(dataset_language[eval_split][question_key]) + ) + } + for eval_split in eval_splits + } + + self.queries[lang_pair] = { + eval_split: {v: k for k, v in queries_to_ids[eval_split].items()} + for eval_split in eval_splits + } + + corpus_to_ids = { + eval_split: { + document: str(_id) + for _id, document in enumerate( + set(dataset_language[eval_split][corpus_key]) + ) + } + for eval_split in eval_splits + } + + self.corpus[lang_pair] = { + eval_split: { + v: {"text": k} for k, v in corpus_to_ids[eval_split].items() + } + for eval_split in eval_splits + } + + self.relevant_docs[lang_pair] = {} + for eval_split in eval_splits: + self.relevant_docs[lang_pair][eval_split] = {} + for example in dataset_language[eval_split]: + query_id = queries_to_ids[eval_split].get(example[question_key]) + document_id = corpus_to_ids[eval_split].get(example[corpus_key]) + if query_id in self.relevant_docs[lang_pair][eval_split]: + self.relevant_docs[lang_pair][eval_split][query_id][ + document_id + ] = 1 + else: + self.relevant_docs[lang_pair][eval_split][query_id] = { + document_id: 1 + } + + self.data_loaded = True + + +def _load_dataset_csv(path: str, revision: str, eval_splits: List[str]): + data_files = { + eval_split: f"https://huggingface.co/datasets/{path}/resolve/{revision}/{eval_split}.csv" + for eval_split in eval_splits + } + dataset = datasets.load_dataset("csv", data_files=data_files) + dataset = dataset.filter(lambda x: x["answer"] is not None) + + return dataset diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XQuADRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XQuADRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..05e7d0b417db10445de6048df310de0d1273710a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/XQuADRetrieval.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +from hashlib import sha256 + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks import MultilingualTask +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + +_LANGUAGES = { + "ar": ["arb-Arab"], + "de": ["deu-Latn"], + "el": ["ell-Grek"], + "en": ["eng-Latn"], + "es": ["spa-Latn"], + "hi": ["hin-Deva"], + "ro": ["ron-Latn"], + "ru": ["rus-Cyrl"], + "th": ["tha-Thai"], + "tr": ["tur-Latn"], + "vi": ["vie-Latn"], + "zh": ["zho-Hans"], +} + + +class XQuADRetrieval(MultilingualTask, AbsTaskRetrieval): + metadata = TaskMetadata( + name="XQuADRetrieval", + dataset={ + "path": "google/xquad", + "revision": "51adfef1c1287aab1d2d91b5bead9bcfb9c68583", + }, + description="XQuAD is a benchmark dataset for evaluating cross-lingual question answering performance. It is repurposed retrieving relevant context for each question.", + reference="https://huggingface.co/datasets/xquad", + type="Retrieval", + category="s2p", + eval_splits=["validation"], + eval_langs=_LANGUAGES, + main_score="ndcg_at_10", + date=("2019-05-21", "2019-11-21"), + form=["written"], + domains=["Web"], + task_subtypes=["Question answering"], + license="CC BY-SA 4.0", + socioeconomic_status="mixed", + annotations_creators="human-annotated", + dialect=[], + text_creation="created", + bibtex_citation="""@article{Artetxe:etal:2019, + author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama}, + title = {On the cross-lingual transferability of monolingual representations}, + journal = {CoRR}, + volume = {abs/1910.11856}, + year = {2019}, + archivePrefix = {arXiv}, + eprint = {1910.11856} +} +@inproceedings{ + dumitrescu2021liro, + title={LiRo: Benchmark and leaderboard for Romanian language tasks}, + author={Stefan Daniel Dumitrescu and Petru Rebeja and Beata Lorincz and Mihaela Gaman and Andrei Avram and Mihai Ilie and Andrei Pruteanu and Adriana Stan and Lorena Rosia and Cristina Iacobescu and Luciana Morogan and George Dima and Gabriel Marchidan and Traian Rebedea and Madalina Chitez and Dani Yogatama and Sebastian Ruder and Radu Tudor Ionescu and Razvan Pascanu and Viorica Patraucean}, + booktitle={Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1)}, + year={2021}, + url={https://openreview.net/forum?id=JH61CD7afTv} +}""", + n_samples={"test": 1190}, + avg_character_length={"test": 788.7}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + split = "validation" + queries = {lang: {split: {}} for lang in self.hf_subsets} + corpus = {lang: {split: {}} for lang in self.hf_subsets} + relevant_docs = {lang: {split: {}} for lang in self.hf_subsets} + + for lang in self.hf_subsets: + data = datasets.load_dataset( + name=f"xquad.{lang}", **self.metadata_dict["dataset"] + )[split] + data = data.filter(lambda x: x["answers"]["text"] != "") + + question_ids = { + question: id for id, question in zip(data["id"], data["question"]) + } + context_ids = { + context: sha256(context.encode("utf-8")).hexdigest() + for context in set(data["context"]) + } + + for row in data: + question = row["question"] + context = row["context"] + query_id = question_ids[question] + queries[lang][split][query_id] = question + + doc_id = context_ids[context] + corpus[lang][split][doc_id] = {"text": context} + if query_id not in relevant_docs[lang][split]: + relevant_docs[lang][split][query_id] = {} + relevant_docs[lang][split][query_id][doc_id] = 1 + + self.corpus = datasets.DatasetDict(corpus) + self.queries = datasets.DatasetDict(queries) + self.relevant_docs = datasets.DatasetDict(relevant_docs) + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/multilingual/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/norquad.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/norquad.py new file mode 100644 index 0000000000000000000000000000000000000000..6b1f5addb3aa46b81e5c2bf20875a1782d9d15bd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/norquad.py @@ -0,0 +1,95 @@ +import datasets + +from mteb.abstasks import AbsTaskRetrieval, TaskMetadata + + +class NorQuadRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NorQuadRetrieval", + dataset={ + "path": "mteb/norquad_retrieval", + "revision": "9dcfcdb2aa578dd178330d49bf564248935f7fbe", + }, + description="Human-created question for Norwegian wikipedia passages.", + reference="https://aclanthology.org/2023.nodalida-1.17/", + type="Retrieval", + category="p2p", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="ndcg_at_10", + date=("2022-01-01", "2023-12-31"), + form=["written"], + task_subtypes=["Question answering"], + domains=["Encyclopaedic", "Non-fiction"], + license="CC-BY-SA-4.0", + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@inproceedings{ivanova-etal-2023-norquad, + title = "{N}or{Q}u{AD}: {N}orwegian Question Answering Dataset", + author = "Ivanova, Sardana and + Andreassen, Fredrik and + Jentoft, Matias and + Wold, Sondre and + {\O}vrelid, Lilja", + editor = {Alum{\"a}e, Tanel and + Fishel, Mark}, + booktitle = "Proceedings of the 24th Nordic Conference on Computational Linguistics (NoDaLiDa)", + month = may, + year = "2023", + address = "T{\'o}rshavn, Faroe Islands", + publisher = "University of Tartu Library", + url = "https://aclanthology.org/2023.nodalida-1.17", + pages = "159--168", + abstract = "In this paper we present NorQuAD: the first Norwegian question answering dataset for machine reading comprehension. The dataset consists of 4,752 manually created question-answer pairs. We here detail the data collection procedure and present statistics of the dataset. We also benchmark several multilingual and Norwegian monolingual language models on the dataset and compare them against human performance. The dataset will be made freely available.", +}""", + n_samples={"test": 2602}, + avg_character_length={"test": 502.19}, + ) + + def dataset_transform(self) -> None: + """And transform to a retrieval datset, which have the following attributes + + self.corpus = Dict[doc_id, Dict[str, str]] #id => dict with document datas like title and text + self.queries = Dict[query_id, str] #id => query + self.relevant_docs = Dict[query_id, Dict[[doc_id, score]] + """ + self.corpus = {} + self.relevant_docs = {} + self.queries = {} + text2id = {} + + for split in self.dataset: + ds: datasets.Dataset = self.dataset[split] # type: ignore + ds = ds.shuffle(seed=42) + max_samples = min(1024, len(ds)) + ds = ds.select( + range(max_samples) + ) # limit the dataset size to make sure the task does not take too long to run + self.queries[split] = {} + self.relevant_docs[split] = {} + self.corpus[split] = {} + + question = ds["question"] + context = ds["context"] + answer = [a["text"][0] for a in ds["answers"]] + + n = 0 + for q, cont, ans in zip(question, context, answer): + self.queries[split][str(n)] = q + q_n = n + n += 1 + if cont not in text2id: + text2id[cont] = n + self.corpus[split][str(n)] = {"title": "", "text": cont} + n += 1 + if ans not in text2id: + text2id[ans] = n + self.corpus[split][str(n)] = {"title": "", "text": ans} + n += 1 + + self.relevant_docs[split][str(q_n)] = { + str(text2id[ans]): 1, + str(text2id[cont]): 1, + } # only two correct matches diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/snl_retrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/snl_retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..596b5db326202b4398672c016db81fed5db1bc75 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/nob/snl_retrieval.py @@ -0,0 +1,73 @@ +import datasets + +from mteb.abstasks import AbsTaskRetrieval, TaskMetadata + + +class SNLRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="SNLRetrieval", + dataset={ + "path": "navjordj/SNL_summarization", + "revision": "3d3d27aa7af8941408cefc3991ada5d12a4273d1", + }, + description="Webscrabed articles and ingresses from the Norwegian lexicon 'Det Store Norske Leksikon'.", + reference="https://huggingface.co/datasets/navjordj/SNL_summarization", + type="Retrieval", + category="p2p", + eval_splits=["test"], + eval_langs=["nob-Latn"], + main_score="ndcg_at_10", + date=("2020-01-01", "2024-12-31"), # best guess + form=["written"], + domains=["Encyclopaedic", "Non-fiction"], + license=None, + socioeconomic_status="high", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation="""@mastersthesis{navjord2023beyond, + title={Beyond extractive: advancing abstractive automatic text summarization in Norwegian with transformers}, + author={Navjord, J{\o}rgen Johnsen and Korsvik, Jon-Mikkel Ryen}, + year={2023}, + school={Norwegian University of Life Sciences, {\AA}s} +}""", + n_samples={"test": 2048}, + avg_character_length={"test": 1101.30}, + task_subtypes=["Article retrieval"], + ) + + def dataset_transform(self) -> None: + """And transform to a retrieval datset, which have the following attributes + + self.corpus = Dict[doc_id, Dict[str, str]] #id => dict with document datas like title and text + self.queries = Dict[query_id, str] #id => query + self.relevant_docs = Dict[query_id, Dict[[doc_id, score]] + """ + self.corpus = {} + self.relevant_docs = {} + self.queries = {} + text2id = {} + + for split in self.dataset: + ds: datasets.Dataset = self.dataset[split] # type: ignore + ds = ds.shuffle(seed=42) + + self.queries[split] = {} + self.relevant_docs[split] = {} + self.corpus[split] = {} + + headline = ds["headline"] + article = ds["article"] + + n = 0 + for headl, art in zip(headline, article): + self.queries[split][str(n)] = headl + q_n = n + n += 1 + if art not in text2id: + text2id[art] = n + self.corpus[split][str(n)] = {"title": "", "text": art} + n += 1 + self.relevant_docs[split][str(q_n)] = { + str(text2id[art]): 1 + } # only one correct matches diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/ArguAnaPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/ArguAnaPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..e3429ecfed50384fdb6b41888734fdaa8a9e31ad --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/ArguAnaPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class ArguAnaPL(AbsTaskRetrieval): + metadata = TaskMetadata( + name="ArguAna-PL", + description="ArguAna-PL", + reference="https://huggingface.co/datasets/clarin-knext/arguana-pl", + dataset={ + "path": "clarin-knext/arguana-pl", + "revision": "63fc86750af76253e8c760fc9e534bbf24d260a2", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/DBPediaPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/DBPediaPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca52081160b9867a67780e244feb161fd47fb92 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/DBPediaPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class DBPediaPL(AbsTaskRetrieval): + metadata = TaskMetadata( + name="DBPedia-PL", + description="DBpedia-Entity is a standard test collection for entity search over the DBpedia knowledge base", + reference="https://github.com/iai-group/DBpedia-Entity/", + dataset={ + "path": "clarin-knext/dbpedia-pl", + "revision": "76afe41d9af165cc40999fcaa92312b8b012064a", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/FiQAPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/FiQAPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..916f206974dafeaf2ebefcefecedccad1b9ede7b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/FiQAPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class FiQAPLRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="FiQA-PL", + description="Financial Opinion Mining and Question Answering", + reference="https://sites.google.com/view/fiqa/", + dataset={ + "path": "clarin-knext/fiqa-pl", + "revision": "2e535829717f8bf9dc829b7f911cc5bbd4e6608e", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/HotpotQAPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/HotpotQAPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..98f9c1875b12c94a99c234b1bafda0e9cad31ed0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/HotpotQAPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class HotpotQAPL(AbsTaskRetrieval): + metadata = TaskMetadata( + name="HotpotQA-PL", + description="HotpotQA is a question answering dataset featuring natural, multi-hop questions, with strong supervision for supporting facts to enable more explainable question answering systems.", + reference="https://hotpotqa.github.io/", + dataset={ + "path": "clarin-knext/hotpotqa-pl", + "revision": "a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/MSMARCOPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/MSMARCOPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..13760771ad5c60d97eb10394d5ec1a5d44c85928 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/MSMARCOPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class MSMARCOPL(AbsTaskRetrieval): + metadata = TaskMetadata( + name="MSMARCO-PL", + description="MS MARCO is a collection of datasets focused on deep learning in search", + reference="https://microsoft.github.io/msmarco/", + dataset={ + "path": "clarin-knext/msmarco-pl", + "revision": "8634c07806d5cce3a6138e260e59b81760a0a640", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/NFCorpusPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/NFCorpusPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..7ff8b7bcb8b259490186e6f6df880d2db400b38d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/NFCorpusPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class NFCorpusPL(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NFCorpus-PL", + description="NFCorpus: A Full-Text Learning to Rank Dataset for Medical Information Retrieval", + reference="https://www.cl.uni-heidelberg.de/statnlpgroup/nfcorpus/", + dataset={ + "path": "clarin-knext/nfcorpus-pl", + "revision": "9a6f9567fda928260afed2de480d79c98bf0bec0", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/NQPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/NQPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..e303de290b3801f209591e7599f79de971d358f7 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/NQPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class NQPL(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NQ-PL", + description="Natural Questions: A Benchmark for Question Answering Research", + reference="https://ai.google.com/research/NaturalQuestions/", + dataset={ + "path": "clarin-knext/nq-pl", + "revision": "f171245712cf85dd4700b06bef18001578d0ca8d", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/TRECCOVIDPLRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/TRECCOVIDPLRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..0ae2a134c690f399164a1c2c8bd324717cf0184f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/pol/TRECCOVIDPLRetrieval.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class TRECCOVIDPL(AbsTaskRetrieval): + metadata = TaskMetadata( + name="TRECCOVID-PL", + description="TRECCOVID is an ad-hoc search challenge based on the COVID-19 dataset containing scientific articles related to the COVID-19 pandemic.", + reference="https://ir.nist.gov/covidSubmit/index.html", + dataset={ + "path": "clarin-knext/trec-covid-pl", + "revision": "81bcb408f33366c2a20ac54adafad1ae7e877fdd", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["pol-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/slk/SlovakSumRetrieval.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/slk/SlovakSumRetrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d2321bc5cae600b8dd98b746e46ca355576fb0c7 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/slk/SlovakSumRetrieval.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class SlovakSumRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="SlovakSumRetrieval", + description=""" + SlovakSum, a Slovak news summarization dataset consisting of over 200 thousand + news articles with titles and short abstracts obtained from multiple Slovak newspapers. + + Originally intended as a summarization task, but since no human annotations were provided + here reformulated to a retrieval task. + """, + reference="https://huggingface.co/datasets/NaiveNeuron/slovaksum", + dataset={ + "path": "NaiveNeuron/slovaksum", + "revision": "85d6b32f2762313714618171b9d1a65eb7408835", + }, + type="Retrieval", + category="s2s", + eval_splits=["test"], + eval_langs=["slk-Latn"], + main_score="ndcg_at_10", + date=("2015-04-26", "2022-01-11"), + form=["written"], + domains=["News", "Social", "Web"], + task_subtypes=["Article retrieval"], + license="openrail", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + text_creation="found", + bibtex_citation=""" + @inproceedings{OndrejowaSlovakSum24, + title = {SlovakSum: A Large Scale Slovak Summarization Dataset}, + booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation}, + author = {Ondrejová, Viktória and Šuppa, Marek}, + date = {2024}, + } + """, + n_samples={"test": 600}, + avg_character_length={"test": 238.44}, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + self.corpus, self.queries, self.relevant_docs = {}, {}, {} + dataset_path = self.metadata_dict["dataset"]["path"] + n_sample = self.metadata_dict["n_samples"]["test"] + + for split in kwargs.get("eval_splits", self.metadata_dict["eval_splits"]): + split_ds = datasets.load_dataset( + dataset_path, split=f"{split}[:{n_sample}]" + ) + # Transforming news summary into retrieval task + queries = {f"q{e+1}": x["sum"] for e, x in enumerate(split_ds)} + corpus = { + f"d{e+1}": {"title": x["title"], "text": x["text"]} + for e, x in enumerate(split_ds) + } + qrels = {f"q{i+1}": {f"d{i+1}": 1} for i in range(split_ds.shape[0])} + self.corpus[split], self.queries[split], self.relevant_docs[split] = ( + corpus, + queries, + qrels, + ) + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/slk/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/slk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/SpanishPassageRetrievalS2P.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/SpanishPassageRetrievalS2P.py new file mode 100644 index 0000000000000000000000000000000000000000..10753ac32adaa1b5f06c5ee0618b08efe4450ad3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/SpanishPassageRetrievalS2P.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class SpanishPassageRetrievalS2P(AbsTaskRetrieval): + metadata = TaskMetadata( + name="SpanishPassageRetrievalS2P", + description="Test collection for passage retrieval from health-related Web resources in Spanish.", + reference="https://mklab.iti.gr/results/spanish-passage-retrieval-dataset/", + dataset={ + "path": "jinaai/spanish_passage_retrieval", + "revision": "9cddf2ce5209ade52c2115ccfa00eb22c6d3a837", + }, + type="Retrieval", + category="s2p", + eval_splits=["test"], + eval_langs=["spa-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_rows = datasets.load_dataset( + name="queries", + split="test", + trust_remote_code=True, + **self.metadata_dict["dataset"], + ) + corpus_rows = datasets.load_dataset( + name="corpus.documents", + split="test", + trust_remote_code=True, + **self.metadata_dict["dataset"], + ) + qrels_rows = datasets.load_dataset( + name="qrels.s2p", + split="test", + trust_remote_code=True, + **self.metadata_dict["dataset"], + ) + + self.queries = {"test": {row["_id"]: row["text"] for row in query_rows}} + self.corpus = {"test": {row["_id"]: row for row in corpus_rows}} + self.relevant_docs = { + "test": { + row["_id"]: {v: 1 for v in row["text"].split(" ")} for row in qrels_rows + } + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/SpanishPassageRetrievalS2S.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/SpanishPassageRetrievalS2S.py new file mode 100644 index 0000000000000000000000000000000000000000..3e0675750386d029801b8e81de589d1920654de8 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/SpanishPassageRetrievalS2S.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class SpanishPassageRetrievalS2S(AbsTaskRetrieval): + metadata = TaskMetadata( + name="SpanishPassageRetrievalS2S", + description="Test collection for passage retrieval from health-related Web resources in Spanish.", + reference="https://mklab.iti.gr/results/spanish-passage-retrieval-dataset/", + dataset={ + "path": "jinaai/spanish_passage_retrieval", + "revision": "9cddf2ce5209ade52c2115ccfa00eb22c6d3a837", + "trust_remote_code": True, + }, + type="Retrieval", + category="s2s", + eval_splits=["test"], + eval_langs=["spa-Latn"], + main_score="ndcg_at_10", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + query_rows = datasets.load_dataset( + name="queries", + split="test", + **self.metadata_dict["dataset"], + ) + corpus_rows = datasets.load_dataset( + name="corpus.sentences", + split="test", + **self.metadata_dict["dataset"], + ) + qrels_rows = datasets.load_dataset( + name="qrels.s2s", + split="test", + **self.metadata_dict["dataset"], + ) + + self.queries = {"test": {row["_id"]: row["text"] for row in query_rows}} + self.corpus = {"test": {row["_id"]: row for row in corpus_rows}} + self.relevant_docs = { + "test": { + row["_id"]: {v: 1 for v in row["text"].split(" ")} for row in qrels_rows + } + } + + self.data_loaded = True diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/spa/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/swe/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/swe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/zho/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/Retrieval/zho/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/STS/deu/__init__.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/STS/deu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/embeddings-benchmark__mteb/mteb/tasks/STS/eng/SickrSTS.py b/testbed/embeddings-benchmark__mteb/mteb/tasks/STS/eng/SickrSTS.py new file mode 100644 index 0000000000000000000000000000000000000000..d10fc45ccff016a6f8f0b9c392bcfd0f3f2bcbf7 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/mteb/tasks/STS/eng/SickrSTS.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskSTS import AbsTaskSTS + + +class SickrSTS(AbsTaskSTS): + metadata = TaskMetadata( + name="SICK-R", + dataset={ + "path": "mteb/sickr-sts", + "revision": "20a6d6f312dd54037fe07a32d58e5e168867909d", + }, + description="Semantic Textual Similarity SICK-R dataset as described here:", + reference="https://aclanthology.org/2020.lrec-1.207", + type="STS", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="cosine_spearman", + date=None, + form=None, + domains=None, + task_subtypes=None, + license=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation=None, + n_samples=None, + avg_character_length=None, + ) + + @property + def metadata_dict(self) -> dict[str, str]: + metadata_dict = super().metadata_dict + metadata_dict["min_score"] = 0 + metadata_dict["max_score"] = 5 + return metadata_dict diff --git a/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/Core17InstructionRetrieval.json b/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/Core17InstructionRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..b8643e955e5c7fe394d722a40236301b59321db6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/Core17InstructionRetrieval.json @@ -0,0 +1,76 @@ +{ + "dataset_revision": "e783a88ec6bc4bbdc1ba998edd85edeaf3d820f7", + "mteb_dataset_name": "Core17InstructionRetrieval", + "mteb_version": "1.6.11", + "test": { + "evaluation_time": 792.01, + "individual": { + "base": {}, + "changed": { + "map_at_1": 0.01478, + "map_at_10": 0.10619, + "map_at_100": 0.30902, + "map_at_1000": 0.37725, + "map_at_3": 0.04336, + "map_at_5": 0.06354, + "mrr_at_1": 0.6, + "mrr_at_10": 0.73714, + "mrr_at_100": 0.73714, + "mrr_at_1000": 0.73714, + "mrr_at_3": 0.71667, + "mrr_at_5": 0.71667, + "ndcg_at_1": 0.55, + "ndcg_at_10": 0.50363, + "ndcg_at_100": 0.516, + "ndcg_at_1000": 0.71342, + "ndcg_at_3": 0.57067, + "ndcg_at_5": 0.52991, + "precision_at_1": 0.6, + "precision_at_10": 0.585, + "precision_at_100": 0.272, + "precision_at_1000": 0.0545, + "precision_at_3": 0.66667, + "precision_at_5": 0.62, + "recall_at_1": 0.01478, + "recall_at_10": 0.13198, + "recall_at_100": 0.51375, + "recall_at_1000": 1.0, + "recall_at_3": 0.04686, + "recall_at_5": 0.06977 + }, + "original": { + "map_at_1": 0.00476, + "map_at_10": 0.04736, + "map_at_100": 0.16799, + "map_at_1000": 0.20803, + "map_at_3": 0.01114, + "map_at_5": 0.02339, + "mrr_at_1": 0.2, + "mrr_at_10": 0.36167, + "mrr_at_100": 0.3751, + "mrr_at_1000": 0.3751, + "mrr_at_3": 0.28333, + "mrr_at_5": 0.34083, + "ndcg_at_1": 0.15, + "ndcg_at_10": 0.21477, + "ndcg_at_100": 0.36746, + "ndcg_at_1000": 0.54128, + "ndcg_at_3": 0.14106, + "ndcg_at_5": 0.1883, + "precision_at_1": 0.2, + "precision_at_10": 0.28, + "precision_at_100": 0.157, + "precision_at_1000": 0.0327, + "precision_at_3": 0.2, + "precision_at_5": 0.27, + "recall_at_1": 0.00476, + "recall_at_10": 0.09754, + "recall_at_100": 0.48965, + "recall_at_1000": 1.0, + "recall_at_3": 0.01513, + "recall_at_5": 0.04345 + } + }, + "p-MRR": 0.025878601734131052 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/News21InstructionRetrieval.json b/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/News21InstructionRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..6922e03ba802150c5471712244be4d600a3259ef --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/News21InstructionRetrieval.json @@ -0,0 +1,76 @@ +{ + "dataset_revision": "e783a88ec6bc4bbdc1ba998edd85edeaf3d820f7", + "mteb_dataset_name": "News21InstructionRetrieval", + "mteb_version": "1.6.11", + "test": { + "evaluation_time": 1158.18, + "individual": { + "base": {}, + "changed": { + "map_at_1": 0.02908, + "map_at_10": 0.19383, + "map_at_100": 0.40836, + "map_at_1000": 0.44721, + "map_at_3": 0.08415, + "map_at_5": 0.11831, + "mrr_at_1": 0.625, + "mrr_at_10": 0.78177, + "mrr_at_100": 0.78177, + "mrr_at_1000": 0.78177, + "mrr_at_3": 0.77083, + "mrr_at_5": 0.77865, + "ndcg_at_1": 0.35547, + "ndcg_at_10": 0.45101, + "ndcg_at_100": 0.59189, + "ndcg_at_1000": 0.67465, + "ndcg_at_3": 0.43338, + "ndcg_at_5": 0.42771, + "precision_at_1": 0.625, + "precision_at_10": 0.60625, + "precision_at_100": 0.21125, + "precision_at_1000": 0.03391, + "precision_at_3": 0.70833, + "precision_at_5": 0.6375, + "recall_at_1": 0.02908, + "recall_at_10": 0.24147, + "recall_at_100": 0.69515, + "recall_at_1000": 1.0, + "recall_at_3": 0.09445, + "recall_at_5": 0.13572 + }, + "original": { + "map_at_1": 0.03401, + "map_at_10": 0.15083, + "map_at_100": 0.2794, + "map_at_1000": 0.3016, + "map_at_3": 0.07441, + "map_at_5": 0.09703, + "mrr_at_1": 0.40625, + "mrr_at_10": 0.54852, + "mrr_at_100": 0.55324, + "mrr_at_1000": 0.55324, + "mrr_at_3": 0.50521, + "mrr_at_5": 0.52552, + "ndcg_at_1": 0.25781, + "ndcg_at_10": 0.33801, + "ndcg_at_100": 0.4912, + "ndcg_at_1000": 0.55834, + "ndcg_at_3": 0.23712, + "ndcg_at_5": 0.2517, + "precision_at_1": 0.40625, + "precision_at_10": 0.34063, + "precision_at_100": 0.12437, + "precision_at_1000": 0.01916, + "precision_at_3": 0.32292, + "precision_at_5": 0.325, + "recall_at_1": 0.03401, + "recall_at_10": 0.25384, + "recall_at_100": 0.7248, + "recall_at_1000": 1.0, + "recall_at_3": 0.08402, + "recall_at_5": 0.12776 + } + }, + "p-MRR": 0.021292335979316977 + } + } \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/Robust04InstructionRetrieval.json b/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/Robust04InstructionRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..a5a8e45f10ca56d80a690d218b5d0cadea41b224 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/GritLM__GritLM-7B/Robust04InstructionRetrieval.json @@ -0,0 +1,76 @@ +{ + "dataset_revision": "e783a88ec6bc4bbdc1ba998edd85edeaf3d820f7", + "mteb_dataset_name": "Robust04InstructionRetrieval", + "mteb_version": "1.6.11", + "test": { + "evaluation_time": 1942.96, + "individual": { + "base": {}, + "changed": { + "map_at_1": 0.04426, + "map_at_10": 0.19172, + "map_at_100": 0.35384, + "map_at_1000": 0.39464, + "map_at_3": 0.10181, + "map_at_5": 0.1401, + "mrr_at_1": 0.69231, + "mrr_at_10": 0.79188, + "mrr_at_100": 0.79501, + "mrr_at_1000": 0.79501, + "mrr_at_3": 0.78205, + "mrr_at_5": 0.78974, + "ndcg_at_1": 0.57692, + "ndcg_at_10": 0.52491, + "ndcg_at_100": 0.57756, + "ndcg_at_1000": 0.71467, + "ndcg_at_3": 0.57057, + "ndcg_at_5": 0.55906, + "precision_at_1": 0.69231, + "precision_at_10": 0.52308, + "precision_at_100": 0.18981, + "precision_at_1000": 0.03369, + "precision_at_3": 0.65385, + "precision_at_5": 0.62692, + "recall_at_1": 0.04426, + "recall_at_10": 0.23332, + "recall_at_100": 0.63956, + "recall_at_1000": 1.0, + "recall_at_3": 0.11023, + "recall_at_5": 0.15957 + }, + "original": { + "map_at_1": 0.05644, + "map_at_10": 0.17143, + "map_at_100": 0.266, + "map_at_1000": 0.28981, + "map_at_3": 0.11204, + "map_at_5": 0.14176, + "mrr_at_1": 0.5, + "mrr_at_10": 0.59976, + "mrr_at_100": 0.61034, + "mrr_at_1000": 0.61034, + "mrr_at_3": 0.5609, + "mrr_at_5": 0.59167, + "ndcg_at_1": 0.44231, + "ndcg_at_10": 0.35452, + "ndcg_at_100": 0.47091, + "ndcg_at_1000": 0.59536, + "ndcg_at_3": 0.38415, + "ndcg_at_5": 0.37523, + "precision_at_1": 0.5, + "precision_at_10": 0.27692, + "precision_at_100": 0.10423, + "precision_at_1000": 0.01981, + "precision_at_3": 0.39744, + "precision_at_5": 0.36538, + "recall_at_1": 0.05644, + "recall_at_10": 0.23527, + "recall_at_100": 0.61112, + "recall_at_1000": 1.0, + "recall_at_3": 0.12457, + "recall_at_5": 0.17581 + } + }, + "p-MRR": -0.013554693561576736 + } + } \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json b/testbed/embeddings-benchmark__mteb/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json new file mode 100644 index 0000000000000000000000000000000000000000..8162cfdef7c20f89dddd8d42f1e5059833b2eb47 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json @@ -0,0 +1,43 @@ +{ + "dataset_revision": "ec0fa4fe99da2ff19ca1214b7966684033a58814", + "mteb_dataset_name": "NFCorpus", + "mteb_version": "1.6.34", + "test": { + "evaluation_time": 0.94, + "map_at_1": 0.05123, + "map_at_10": 0.09931, + "map_at_100": 0.09931, + "map_at_1000": 0.09931, + "map_at_20": 0.09931, + "map_at_3": 0.08578, + "map_at_5": 0.09931, + "mrr_at_1": 0.44272, + "mrr_at_10": 0.52394, + "mrr_at_100": 0.52394, + "mrr_at_1000": 0.52394, + "mrr_at_20": 0.52394, + "mrr_at_3": 0.51187, + "mrr_at_5": 0.52394, + "ndcg_at_1": 0.41641, + "ndcg_at_10": 0.25973, + "ndcg_at_100": 0.17476, + "ndcg_at_1000": 0.17186, + "ndcg_at_20": 0.21087, + "ndcg_at_3": 0.3807, + "ndcg_at_5": 0.35035, + "precision_at_1": 0.43653, + "precision_at_10": 0.1517, + "precision_at_100": 0.01517, + "precision_at_1000": 0.00152, + "precision_at_20": 0.07585, + "precision_at_3": 0.36326, + "precision_at_5": 0.30341, + "recall_at_1": 0.05123, + "recall_at_10": 0.11965, + "recall_at_100": 0.11965, + "recall_at_1000": 0.11965, + "recall_at_20": 0.11965, + "recall_at_3": 0.09663, + "recall_at_5": 0.11965 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloProfClusteringP2P.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloProfClusteringP2P.json new file mode 100644 index 0000000000000000000000000000000000000000..775c90043252057964e383ddf2d145c135dddda4 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloProfClusteringP2P.json @@ -0,0 +1,11 @@ +{ + "dataset_revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "mteb_dataset_name": "AlloProfClusteringP2P", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 55.43, + "main_score": 0.6119454858792518, + "v_measure": 0.6119454858792518, + "v_measure_std": 0.025929284476722155 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloProfClusteringS2S.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloProfClusteringS2S.json new file mode 100644 index 0000000000000000000000000000000000000000..ec3a6f0bce8377996f548e3be960260f6eed59c3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloProfClusteringS2S.json @@ -0,0 +1,11 @@ +{ + "dataset_revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "mteb_dataset_name": "AlloProfClusteringS2S", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 8.13, + "main_score": 0.39560544657085617, + "v_measure": 0.39560544657085617, + "v_measure_std": 0.01742203672499015 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloprofReranking.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloprofReranking.json new file mode 100644 index 0000000000000000000000000000000000000000..29f605afa8214cb3e14905d67d89e2a0d426167b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloprofReranking.json @@ -0,0 +1,10 @@ +{ + "dataset_revision": "e40c8a63ce02da43200eccb5b0846fcaa888f562", + "mteb_dataset_name": "AlloprofReranking", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 4251.71, + "map": 0.5050037818841264, + "mrr": 0.5186609027606437 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloprofRetrieval.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloprofRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..46eccd22ca9eeb69309598cfe578acfa0535dfc7 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AlloprofRetrieval.json @@ -0,0 +1,43 @@ +{ + "dataset_revision": "fcf295ea64c750f41fadbaa37b9b861558e1bfbd", + "mteb_dataset_name": "AlloprofRetrieval", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 84.74, + "map_at_1": 0.12133, + "map_at_10": 0.18133, + "map_at_100": 0.19369, + "map_at_1000": 0.19481, + "map_at_20": 0.18802, + "map_at_3": 0.15861, + "map_at_5": 0.16908, + "mrr_at_1": 0.12133, + "mrr_at_10": 0.18133, + "mrr_at_100": 0.19369, + "mrr_at_1000": 0.19481, + "mrr_at_20": 0.18802, + "mrr_at_3": 0.15861, + "mrr_at_5": 0.16908, + "ndcg_at_1": 0.12133, + "ndcg_at_10": 0.21938, + "ndcg_at_100": 0.28554, + "ndcg_at_1000": 0.31928, + "ndcg_at_20": 0.24389, + "ndcg_at_3": 0.17097, + "ndcg_at_5": 0.18974, + "precision_at_1": 0.12133, + "precision_at_10": 0.03437, + "precision_at_100": 0.00667, + "precision_at_1000": 0.00094, + "precision_at_20": 0.02204, + "precision_at_3": 0.06894, + "precision_at_5": 0.05043, + "recall_at_1": 0.12133, + "recall_at_10": 0.3437, + "recall_at_100": 0.66667, + "recall_at_1000": 0.94085, + "recall_at_20": 0.44085, + "recall_at_3": 0.20682, + "recall_at_5": 0.25216 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AmazonReviewsClassification.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AmazonReviewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..db8d9cdc4a3d89da2af3293b2c39e96dd645af67 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/AmazonReviewsClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "1399c76144fd37290681b995c656ef9b2e06e26d", + "mteb_dataset_name": "AmazonReviewsClassification", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 53.36, + "fr": { + "accuracy": 0.35132, + "accuracy_stderr": 0.02341165521700676, + "f1": 0.34868691972809385, + "f1_stderr": 0.022076068674336893, + "main_score": 0.35132 + } + }, + "validation": { + "evaluation_time": 52.09, + "fr": { + "accuracy": 0.34986, + "accuracy_stderr": 0.02283471917935494, + "f1": 0.3472428072930915, + "f1_stderr": 0.02194241622754175, + "main_score": 0.34986 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/HALClusteringS2S.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/HALClusteringS2S.json new file mode 100644 index 0000000000000000000000000000000000000000..64b8a13d40e0813c0868b2099d42f483d1b82668 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/HALClusteringS2S.json @@ -0,0 +1,11 @@ +{ + "dataset_revision": "e06ebbbb123f8144bef1a5d18796f3dec9ae2915", + "mteb_dataset_name": "HALClusteringS2S", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 481.37, + "main_score": 0.19045993332986033, + "v_measure": 0.19045993332986033, + "v_measure_std": 0.02753473137555827 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MLSUMClusteringP2P.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MLSUMClusteringP2P.json new file mode 100644 index 0000000000000000000000000000000000000000..b1b35ac5151af68360cf01ab90fcb4f3ab842b6e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MLSUMClusteringP2P.json @@ -0,0 +1,11 @@ +{ + "dataset_revision": "b5d54f8f3b61ae17845046286940f03c6bc79bc7", + "mteb_dataset_name": "MLSUMClusteringP2P", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 333.46, + "main_score": 0.34809530391310306, + "v_measure": 0.34809530391310306, + "v_measure_std": 0.013819568228235381 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MLSUMClusteringS2S.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MLSUMClusteringS2S.json new file mode 100644 index 0000000000000000000000000000000000000000..a8e3e24711d8c2a16181fe7a3db6d47d2b362129 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MLSUMClusteringS2S.json @@ -0,0 +1,11 @@ +{ + "dataset_revision": "b5d54f8f3b61ae17845046286940f03c6bc79bc7", + "mteb_dataset_name": "MLSUMClusteringS2S", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 69.71, + "main_score": 0.2452171783008227, + "v_measure": 0.2452171783008227, + "v_measure_std": 0.018606121595882312 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MTOPDomainClassification.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MTOPDomainClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..895c50a64f8533144350139112e3bd0abd4d9806 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MTOPDomainClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "d80d48c1eb48d3562165c59d59d0034df9fff0bf", + "mteb_dataset_name": "MTOPDomainClassification", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 13.03, + "fr": { + "accuracy": 0.7761039774506733, + "accuracy_stderr": 0.012244255469538137, + "f1": 0.7755462283793302, + "f1_stderr": 0.012258010623387867, + "main_score": 0.7761039774506733 + } + }, + "validation": { + "evaluation_time": 8.26, + "fr": { + "accuracy": 0.7763474952441344, + "accuracy_stderr": 0.015480859884964141, + "f1": 0.7788868322317383, + "f1_stderr": 0.01617306283627062, + "main_score": 0.7763474952441344 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MTOPIntentClassification.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MTOPIntentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..1d792c285ab007beb0b8f1d9d37cb335bcb08b2e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MTOPIntentClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba", + "mteb_dataset_name": "MTOPIntentClassification", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 46.73, + "fr": { + "accuracy": 0.4980269339179456, + "accuracy_stderr": 0.019031657916761768, + "f1": 0.3506576591368301, + "f1_stderr": 0.008842318703710859, + "main_score": 0.4980269339179456 + } + }, + "validation": { + "evaluation_time": 40.66, + "fr": { + "accuracy": 0.5039315155358276, + "accuracy_stderr": 0.01596422537053101, + "f1": 0.3268191897325229, + "f1_stderr": 0.012026800193710633, + "main_score": 0.5039315155358276 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClassification.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..0131309040f4473010adef0ed29206e19aa41026 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "8ccc72e69e65f40c70e117d8b3c08306bb788b60", + "mteb_dataset_name": "MasakhaNEWSClassification", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 18.35, + "fra": { + "accuracy": 0.6514218009478674, + "accuracy_stderr": 0.011435951196221483, + "f1": 0.6109460672951621, + "f1_stderr": 0.011576723163662353, + "main_score": 0.6514218009478674 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClusteringP2P.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClusteringP2P.json new file mode 100644 index 0000000000000000000000000000000000000000..b015ecf05496ce0d598e1a5b2ba5d86bf641650d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClusteringP2P.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "8ccc72e69e65f40c70e117d8b3c08306bb788b60", + "mteb_dataset_name": "MasakhaNEWSClusteringP2P", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 9.4, + "fra": { + "main_score": 0.3176718999463773, + "v_measure": 0.3176718999463773, + "v_measure_std": 0.3614018728926879 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClusteringS2S.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClusteringS2S.json new file mode 100644 index 0000000000000000000000000000000000000000..c691a6ceb4e6db10059834a3ce88919b0b8b7126 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MasakhaNEWSClusteringS2S.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "8ccc72e69e65f40c70e117d8b3c08306bb788b60", + "mteb_dataset_name": "MasakhaNEWSClusteringS2S", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 2.15, + "fra": { + "main_score": 0.32208404545907676, + "v_measure": 0.32208404545907676, + "v_measure_std": 0.3642937198369967 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MassiveIntentClassification.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MassiveIntentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..03ba4de71d0fad62f25bcd7a47bc4c2c6bf060a6 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MassiveIntentClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "31efe3c427b0bae9c22cbb560b8f15491cc6bed7", + "mteb_dataset_name": "MassiveIntentClassification", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 33.79, + "fr": { + "accuracy": 0.5440147948890384, + "accuracy_stderr": 0.014428465092304197, + "f1": 0.5233382318104363, + "f1_stderr": 0.015300657440653833, + "main_score": 0.5440147948890384 + } + }, + "validation": { + "evaluation_time": 31.09, + "fr": { + "accuracy": 0.5463846532218397, + "accuracy_stderr": 0.01693795673965642, + "f1": 0.5179773557688407, + "f1_stderr": 0.018194100485743638, + "main_score": 0.5463846532218397 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MassiveScenarioClassification.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MassiveScenarioClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..0c7e97d6b6b4246b64538ba8dbe3e4cf41226a9a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MassiveScenarioClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "7d571f92784cd94a019292a1f45445077d0ef634", + "mteb_dataset_name": "MassiveScenarioClassification", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 13.2, + "fr": { + "accuracy": 0.6325823806321453, + "accuracy_stderr": 0.018329682460092967, + "f1": 0.637887777522049, + "f1_stderr": 0.016205944582065524, + "main_score": 0.6325823806321453 + } + }, + "validation": { + "evaluation_time": 10.81, + "fr": { + "accuracy": 0.6296114117068372, + "accuracy_stderr": 0.016321357622269633, + "f1": 0.6339693096054267, + "f1_stderr": 0.01340308441006964, + "main_score": 0.6296114117068372 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MintakaRetrieval.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MintakaRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..6ebc7325f81ae8792016d9ef659f4fead7ce3c5d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/MintakaRetrieval.json @@ -0,0 +1,45 @@ +{ + "dataset_revision": "efa78cc2f74bbcd21eff2261f9e13aebe40b814e", + "mteb_dataset_name": "MintakaRetrieval", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 15.47, + "fr": { + "map_at_1": 0.08436, + "map_at_10": 0.115, + "map_at_100": 0.11932, + "map_at_1000": 0.12058, + "map_at_20": 0.11728, + "map_at_3": 0.10347, + "map_at_5": 0.11, + "mrr_at_1": 0.08436, + "mrr_at_10": 0.115, + "mrr_at_100": 0.11932, + "mrr_at_1000": 0.12058, + "mrr_at_20": 0.11728, + "mrr_at_3": 0.10347, + "mrr_at_5": 0.11, + "ndcg_at_1": 0.08436, + "ndcg_at_10": 0.13357, + "ndcg_at_100": 0.15867, + "ndcg_at_1000": 0.21048, + "ndcg_at_20": 0.14186, + "ndcg_at_3": 0.10971, + "ndcg_at_5": 0.1215, + "precision_at_1": 0.08436, + "precision_at_10": 0.01937, + "precision_at_100": 0.00321, + "precision_at_1000": 0.00077, + "precision_at_20": 0.01132, + "precision_at_3": 0.04259, + "precision_at_5": 0.03129, + "recall_at_1": 0.08436, + "recall_at_10": 0.19369, + "recall_at_100": 0.32146, + "recall_at_1000": 0.77232, + "recall_at_20": 0.22645, + "recall_at_3": 0.12776, + "recall_at_5": 0.15643 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/OpusparcusPC.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/OpusparcusPC.json new file mode 100644 index 0000000000000000000000000000000000000000..09ba2a6c1fba57a5240194ee314295e34b1c9b44 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/OpusparcusPC.json @@ -0,0 +1,97 @@ +{ + "dataset_revision": "9e9b1f8ef51616073f47f306f7f47dd91663f86a", + "mteb_dataset_name": "OpusparcusPC", + "mteb_version": "1.6.16", + "test.full": { + "evaluation_time": 6.79, + "fr": { + "cos_sim": { + "accuracy": 0.8092643051771117, + "accuracy_threshold": 0.3072311282157898, + "ap": 0.9205083560505066, + "f1": 0.8679069767441862, + "f1_threshold": 0.27820637822151184, + "precision": 0.8162729658792651, + "recall": 0.9265143992055611 + }, + "dot": { + "accuracy": 0.8072207084468664, + "accuracy_threshold": 3.541840076446533, + "ap": 0.914175148685274, + "f1": 0.8641750227894258, + "f1_threshold": 2.6373980045318604, + "precision": 0.7986520640269588, + "recall": 0.9414101290963257 + }, + "euclidean": { + "accuracy": 0.8065395095367848, + "accuracy_threshold": 3.734102725982666, + "ap": 0.9204201982517962, + "f1": 0.8640732265446224, + "f1_threshold": 3.9497628211975098, + "precision": 0.801358234295416, + "recall": 0.9374379344587885 + }, + "manhattan": { + "accuracy": 0.8113079019073569, + "accuracy_threshold": 82.19488525390625, + "ap": 0.9204054382094206, + "f1": 0.867146282973621, + "f1_threshold": 82.19488525390625, + "precision": 0.8385899814471243, + "recall": 0.8977159880834161 + }, + "max": { + "accuracy": 0.8113079019073569, + "ap": 0.9205083560505066, + "f1": 0.8679069767441862 + } + } + }, + "validation.full": { + "evaluation_time": 6.45, + "fr": { + "cos_sim": { + "accuracy": 0.8076923076923077, + "accuracy_threshold": 0.32468485832214355, + "ap": 0.9328066173124074, + "f1": 0.8745387453874539, + "f1_threshold": 0.25643646717071533, + "precision": 0.8095644748078565, + "recall": 0.950852557673019 + }, + "dot": { + "accuracy": 0.8076923076923077, + "accuracy_threshold": 3.49470853805542, + "ap": 0.9270492994203674, + "f1": 0.8727931190583974, + "f1_threshold": 2.2983222007751465, + "precision": 0.7953795379537953, + "recall": 0.966900702106319 + }, + "euclidean": { + "accuracy": 0.8076923076923077, + "accuracy_threshold": 3.7523207664489746, + "ap": 0.9327659691005011, + "f1": 0.8705234159779613, + "f1_threshold": 3.9855661392211914, + "precision": 0.8027095681625741, + "recall": 0.950852557673019 + }, + "manhattan": { + "accuracy": 0.8076923076923077, + "accuracy_threshold": 82.04548645019531, + "ap": 0.9327278336262662, + "f1": 0.8696897374701671, + "f1_threshold": 83.09516143798828, + "precision": 0.8296903460837887, + "recall": 0.9137412236710131 + }, + "max": { + "accuracy": 0.8076923076923077, + "ap": 0.9328066173124074, + "f1": 0.8745387453874539 + } + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/PawsX.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/PawsX.json new file mode 100644 index 0000000000000000000000000000000000000000..b89bbf1a23afbd921af24a4f5007da30e800159e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/PawsX.json @@ -0,0 +1,97 @@ +{ + "dataset_revision": "8a04d940a42cd40658986fdd8e3da561533a3646", + "mteb_dataset_name": "PawsX", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 27.09, + "fr": { + "cos_sim": { + "accuracy": 0.6, + "accuracy_threshold": 0.9702560305595398, + "ap": 0.5743705197496742, + "f1": 0.6249134948096886, + "f1_threshold": 0.3075079619884491, + "precision": 0.4544539506794162, + "recall": 1.0 + }, + "dot": { + "accuracy": 0.5625, + "accuracy_threshold": 8.532447814941406, + "ap": 0.49022411657318105, + "f1": 0.6260504201680672, + "f1_threshold": 4.510589122772217, + "precision": 0.4577572964669739, + "recall": 0.9900332225913622 + }, + "euclidean": { + "accuracy": 0.599, + "accuracy_threshold": 0.6622645854949951, + "ap": 0.5733055147132647, + "f1": 0.6251298026998962, + "f1_threshold": 3.0350723266601562, + "precision": 0.45468277945619334, + "recall": 1.0 + }, + "manhattan": { + "accuracy": 0.601, + "accuracy_threshold": 13.777313232421875, + "ap": 0.5723594105568219, + "f1": 0.6251298026998962, + "f1_threshold": 64.62416076660156, + "precision": 0.45468277945619334, + "recall": 1.0 + }, + "max": { + "accuracy": 0.601, + "ap": 0.5743705197496742, + "f1": 0.6260504201680672 + } + } + }, + "validation": { + "evaluation_time": 26.83, + "fr": { + "cos_sim": { + "accuracy": 0.6145, + "accuracy_threshold": 0.9890202283859253, + "ap": 0.5688996494049816, + "f1": 0.6043878273177636, + "f1_threshold": 0.6836806535720825, + "precision": 0.43438453713123093, + "recall": 0.9930232558139535 + }, + "dot": { + "accuracy": 0.5805, + "accuracy_threshold": 8.892292976379395, + "ap": 0.4631762651611144, + "f1": 0.6028741675429372, + "f1_threshold": 2.2409679889678955, + "precision": 0.4315102860010035, + "recall": 1.0 + }, + "euclidean": { + "accuracy": 0.6135, + "accuracy_threshold": 0.5454137921333313, + "ap": 0.5705965835041586, + "f1": 0.6045834849036014, + "f1_threshold": 1.8334648609161377, + "precision": 0.43991529910005295, + "recall": 0.9662790697674418 + }, + "manhattan": { + "accuracy": 0.616, + "accuracy_threshold": 11.813432693481445, + "ap": 0.5717060784724066, + "f1": 0.6031634446397188, + "f1_threshold": 58.71994400024414, + "precision": 0.43224181360201513, + "recall": 0.9976744186046511 + }, + "max": { + "accuracy": 0.616, + "ap": 0.5717060784724066, + "f1": 0.6045834849036014 + } + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SICKFr.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SICKFr.json new file mode 100644 index 0000000000000000000000000000000000000000..a5022d0c439b1fa25fbe77e0a47c11bf0ebe530d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SICKFr.json @@ -0,0 +1,35 @@ +{ + "dataset_revision": "e077ab4cf4774a1e36d86d593b150422fafd8e8a", + "mteb_dataset_name": "SICKFr", + "mteb_version": "1.6.16", + "test": { + "cos_sim": { + "pearson": 0.8090896348223799, + "spearman": 0.7417768177912207 + }, + "euclidean": { + "pearson": 0.7788580675913424, + "spearman": 0.7372487878488481 + }, + "evaluation_time": 30.67, + "manhattan": { + "pearson": 0.7786498247900432, + "spearman": 0.736574700292223 + } + }, + "validation": { + "cos_sim": { + "pearson": 0.8154254135260824, + "spearman": 0.762461503640995 + }, + "euclidean": { + "pearson": 0.7822363109158186, + "spearman": 0.7556375468501785 + }, + "evaluation_time": 3.96, + "manhattan": { + "pearson": 0.7814996623037098, + "spearman": 0.7547329747866959 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/STS22.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/STS22.json new file mode 100644 index 0000000000000000000000000000000000000000..22b2d2e632db37f2c13f529957ca8e8b39898a07 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/STS22.json @@ -0,0 +1,22 @@ +{ + "dataset_revision": "eea2b4fe26a775864c896887d910b76a8098ad3f", + "mteb_dataset_name": "STS22", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 4.79, + "fr": { + "cos_sim": { + "pearson": 0.7383938091271671, + "spearman": 0.7753949779251178 + }, + "euclidean": { + "pearson": 0.658424401954861, + "spearman": 0.7571532568677795 + }, + "manhattan": { + "pearson": 0.6817630125205015, + "spearman": 0.764269410953596 + } + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/STSBenchmarkMultilingualSTS.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/STSBenchmarkMultilingualSTS.json new file mode 100644 index 0000000000000000000000000000000000000000..6c1e868806530c48a56047ff952b9a4dd9ca3f0e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/STSBenchmarkMultilingualSTS.json @@ -0,0 +1,39 @@ +{ + "dataset_revision": "93d57ef91790589e3ce9c365164337a8a78b7632", + "dev": { + "evaluation_time": 13.91, + "fr": { + "cos_sim": { + "pearson": 0.8673439664942635, + "spearman": 0.8653680500567739 + }, + "euclidean": { + "pearson": 0.8465451102403826, + "spearman": 0.8519005707572319 + }, + "manhattan": { + "pearson": 0.8447251084557995, + "spearman": 0.8501901211448071 + } + } + }, + "mteb_dataset_name": "STSBenchmarkMultilingualSTS", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 10.31, + "fr": { + "cos_sim": { + "pearson": 0.8235852416280138, + "spearman": 0.8164058478510985 + }, + "euclidean": { + "pearson": 0.8017331005515382, + "spearman": 0.8021524090402087 + }, + "manhattan": { + "pearson": 0.8018873068754694, + "spearman": 0.8019178603890778 + } + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SummEvalFr.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SummEvalFr.json new file mode 100644 index 0000000000000000000000000000000000000000..3f4d2c274d2d1ab5e6ccea4f183d552ba90c99cf --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SummEvalFr.json @@ -0,0 +1,16 @@ +{ + "dataset_revision": "b385812de6a9577b6f4d0f88c6a6e35395a94054", + "mteb_dataset_name": "SummEvalFr", + "mteb_version": "1.6.16", + "test": { + "cos_sim": { + "pearson": 0.2870648764845772, + "spearman": 0.2877116932900191 + }, + "dot": { + "pearson": 0.237687639823705, + "spearman": 0.25364578284071304 + }, + "evaluation_time": 42.51 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SyntecReranking.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SyntecReranking.json new file mode 100644 index 0000000000000000000000000000000000000000..97c5bcf8372a0b964fd712c4d78012473fb4ef40 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SyntecReranking.json @@ -0,0 +1,10 @@ +{ + "dataset_revision": "b205c5084a0934ce8af14338bf03feb19499c84d", + "mteb_dataset_name": "SyntecReranking", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 87.1, + "map": 0.7975, + "mrr": 0.7975 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SyntecRetrieval.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SyntecRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..e9292a725cdb59349c0c3fff16a3049dae3e7a9d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/SyntecRetrieval.json @@ -0,0 +1,43 @@ +{ + "dataset_revision": "19661ccdca4dfc2d15122d776b61685f48c68ca9", + "mteb_dataset_name": "SyntecRetrieval", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 2.87, + "map_at_1": 0.49, + "map_at_10": 0.61879, + "map_at_100": 0.62423, + "map_at_1000": 0.62423, + "map_at_20": 0.62298, + "map_at_3": 0.58, + "map_at_5": 0.606, + "mrr_at_1": 0.49, + "mrr_at_10": 0.61879, + "mrr_at_100": 0.62423, + "mrr_at_1000": 0.62423, + "mrr_at_20": 0.62298, + "mrr_at_3": 0.58, + "mrr_at_5": 0.606, + "ndcg_at_1": 0.49, + "ndcg_at_10": 0.68618, + "ndcg_at_100": 0.70927, + "ndcg_at_1000": 0.70927, + "ndcg_at_20": 0.7014, + "ndcg_at_3": 0.60833, + "ndcg_at_5": 0.65439, + "precision_at_1": 0.49, + "precision_at_10": 0.09, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.048, + "precision_at_3": 0.23, + "precision_at_5": 0.16, + "recall_at_1": 0.49, + "recall_at_10": 0.9, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 0.96, + "recall_at_3": 0.69, + "recall_at_5": 0.8 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/XPQARetrieval.json b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/XPQARetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..4dae088bedcac294991903c9451c1365d734c972 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/dangvantuan/sentence-camembert-base/XPQARetrieval.json @@ -0,0 +1,45 @@ +{ + "dataset_revision": "c99d599f0a6ab9b85b065da6f9d94f9cf731679f", + "mteb_dataset_name": "XPQARetrieval", + "mteb_version": "1.6.16", + "test": { + "evaluation_time": 12.91, + "fr": { + "map_at_1": 0.32708, + "map_at_10": 0.51719, + "map_at_100": 0.53311, + "map_at_1000": 0.5339, + "map_at_20": 0.52762, + "map_at_3": 0.46137, + "map_at_5": 0.49696, + "mrr_at_1": 0.53138, + "mrr_at_10": 0.60851, + "mrr_at_100": 0.61512, + "mrr_at_1000": 0.61538, + "mrr_at_20": 0.61274, + "mrr_at_3": 0.59168, + "mrr_at_5": 0.60196, + "ndcg_at_1": 0.53138, + "ndcg_at_10": 0.57916, + "ndcg_at_100": 0.6326, + "ndcg_at_1000": 0.64944, + "ndcg_at_20": 0.60535, + "ndcg_at_3": 0.53378, + "ndcg_at_5": 0.54916, + "precision_at_1": 0.53138, + "precision_at_10": 0.13765, + "precision_at_100": 0.0181, + "precision_at_1000": 0.00204, + "precision_at_20": 0.0777, + "precision_at_3": 0.33066, + "precision_at_5": 0.23765, + "recall_at_1": 0.32708, + "recall_at_10": 0.66028, + "recall_at_100": 0.8652, + "recall_at_1000": 0.98215, + "recall_at_20": 0.74551, + "recall_at_3": 0.51522, + "recall_at_5": 0.59137 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/HinDialectClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/HinDialectClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..58476c5425802148a5918af4fc8c207de5aa4325 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/HinDialectClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "944a44cf93932ce62b51e7c07d44d8cc03d6bcae", + "mteb_dataset_name": "HinDialectClassification", + "mteb_version": "1.7.5", + "test": { + "accuracy": 0.5381944444444444, + "accuracy_stderr": 0.04503527102809611, + "evaluation_time": 920.79, + "f1": 0.34483787104216207, + "f1_stderr": 0.01786957063101452, + "main_score": 0.5381944444444444 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/MalayalamNewsClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/MalayalamNewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..7e38b7ba724a2ae7e17d4cf2a4067767552263a5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/MalayalamNewsClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "666f63bba2387456d8f846ea4d0565181bd47b81", + "mteb_dataset_name": "MalayalamNewsClassification", + "mteb_version": "1.7.5", + "test": { + "accuracy": 0.7373809523809524, + "accuracy_stderr": 0.046805820497578074, + "evaluation_time": 55.95, + "f1": 0.7369002563401076, + "f1_stderr": 0.04735601833128083, + "main_score": 0.7373809523809524 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/MarathiNewsClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/MarathiNewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..60ef5022235e17cf740d60320e8d18d3514603ee --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/MarathiNewsClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "7640cf8132cca1f99995ac71512a670e3c965cf1", + "mteb_dataset_name": "MarathiNewsClassification", + "mteb_version": "1.7.5", + "test": { + "accuracy": 0.695654296875, + "accuracy_stderr": 0.04901820911020044, + "evaluation_time": 27.57, + "f1": 0.6433506902803117, + "f1_stderr": 0.04811772224336286, + "main_score": 0.695654296875 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/OdiaNewsClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/OdiaNewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..613c1ed6752b24cc7764361e5695f980317a14bb --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/OdiaNewsClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "ffb8a34c9637fb20256e8c7be02504d16af4bd6b", + "mteb_dataset_name": "OdiaNewsClassification", + "mteb_version": "1.7.5", + "test": { + "accuracy": 0.78408203125, + "accuracy_stderr": 0.0251706971568395, + "evaluation_time": 35.36, + "f1": 0.7865500745693408, + "f1_stderr": 0.022387290242092298, + "main_score": 0.78408203125 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/SadeemQuestionRetrieval.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/SadeemQuestionRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..aed283f0aba70e459df96b6164af58f18fdd5c08 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/SadeemQuestionRetrieval.json @@ -0,0 +1,43 @@ +{ + "dataset_revision": "3cb0752b182e5d5d740df547748b06663c8e0bd9", + "mteb_dataset_name": "SadeemQuestionRetrieval", + "mteb_version": "1.8.3", + "test": { + "evaluation_time": 67.58, + "map_at_1": 0.32408, + "map_at_10": 0.58825, + "map_at_100": 0.58921, + "map_at_1000": 0.58925, + "map_at_20": 0.58904, + "map_at_3": 0.5809, + "map_at_5": 0.58657, + "mrr_at_1": 0.30302, + "mrr_at_10": 0.57539, + "mrr_at_100": 0.57635, + "mrr_at_1000": 0.57638, + "mrr_at_20": 0.57618, + "mrr_at_3": 0.56909, + "mrr_at_5": 0.57364, + "ndcg_at_1": 0.32408, + "ndcg_at_10": 0.68539, + "ndcg_at_100": 0.68952, + "ndcg_at_1000": 0.69052, + "ndcg_at_20": 0.68821, + "ndcg_at_3": 0.67141, + "ndcg_at_5": 0.68144, + "precision_at_1": 0.32408, + "precision_at_10": 0.09713, + "precision_at_100": 0.00989, + "precision_at_1000": 0.001, + "precision_at_20": 0.04911, + "precision_at_3": 0.31179, + "precision_at_5": 0.19186, + "recall_at_1": 0.32408, + "recall_at_10": 0.97128, + "recall_at_100": 0.98947, + "recall_at_1000": 0.99761, + "recall_at_20": 0.98229, + "recall_at_3": 0.93538, + "recall_at_5": 0.95931 + } + } \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/SouthAfricanLangClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/SouthAfricanLangClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..35f1a98e76131e5c3bbef850e3b7a96e90ec322e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/SouthAfricanLangClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "5ccda92ffd7e74fa91fed595a1cbcff1bb68ec2d", + "mteb_dataset_name": "SouthAfricanLangClassification", + "mteb_version": "1.7.5", + "test": { + "accuracy": 0.5078125, + "accuracy_stderr": 0.020070675904164264, + "evaluation_time": 113.83, + "f1": 0.49185660745939136, + "f1_stderr": 0.02035992679009311, + "main_score": 0.5078125 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TamilNewsClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TamilNewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..0876ace5a2cf7aaa02fd2adaa0f5c0ecb8437ddd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TamilNewsClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "bb34dd6690cf17aa731d75d45388c5801b8c4e4b", + "mteb_dataset_name": "TamilNewsClassification", + "mteb_version": "1.7.5", + "test": { + "accuracy": 0.36669921875, + "accuracy_stderr": 0.02951659146569823, + "evaluation_time": 30.84, + "f1": 0.371458957905607, + "f1_stderr": 0.026955143258914037, + "main_score": 0.36669921875 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TeluguAndhraJyotiNewsClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TeluguAndhraJyotiNewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..198bddebfdc66935d1315a99e7203fc15dfb1162 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TeluguAndhraJyotiNewsClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "3821aa93aa461c9263071e0897234e8d775ad616", + "mteb_dataset_name": "TeluguAndhraJyotiNewsClassification", + "mteb_version": "1.7.5", + "test": { + "accuracy": 0.789599609375, + "accuracy_stderr": 0.038885371109637475, + "evaluation_time": 485.33, + "f1": 0.7635426709360258, + "f1_stderr": 0.03810103697621232, + "main_score": 0.789599609375 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TswanaNewsClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TswanaNewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..022dd6bdb95fed29929f124ba9189a4bf6e2da02 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat/multilingual-e5-small/TswanaNewsClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "061ca1525717eebaaa9bada240f6cbb31eb3aa87", + "mteb_dataset_name": "TswanaNewsClassification", + "mteb_version": "1.8.6", + "test": { + "accuracy": 0.39281314168377823, + "accuracy_stderr": 0.029671990786941653, + "evaluation_time": 35.91, + "f1": 0.3802380491644858, + "f1_stderr": 0.02700386607738882, + "main_score": 0.39281314168377823 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/Core17InstructionRetrieval.json b/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/Core17InstructionRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..01343e6a155a8cc886a51e89e09416391685516e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/Core17InstructionRetrieval.json @@ -0,0 +1,86 @@ +{ + "dataset_revision": "e783a88ec6bc4bbdc1ba998edd85edeaf3d820f7", + "mteb_dataset_name": "Core17InstructionRetrieval", + "mteb_version": "1.6.11", + "test": { + "evaluation_time": 62.46, + "individual": { + "base": {}, + "changed": { + "map_at_1": 0.01371, + "map_at_10": 0.08184, + "map_at_100": 0.20067, + "map_at_1000": 0.26504, + "map_at_20": 0.11977, + "map_at_3": 0.03475, + "map_at_5": 0.04608, + "mrr_at_1": 0.55, + "mrr_at_10": 0.67125, + "mrr_at_100": 0.67125, + "mrr_at_1000": 0.67125, + "mrr_at_20": 0.67125, + "mrr_at_3": 0.63333, + "mrr_at_5": 0.64333, + "ndcg_at_1": 0.45, + "ndcg_at_10": 0.37635, + "ndcg_at_100": 0.3883, + "ndcg_at_1000": 0.63777, + "ndcg_at_20": 0.35579, + "ndcg_at_3": 0.43673, + "ndcg_at_5": 0.38384, + "precision_at_1": 0.55, + "precision_at_10": 0.44, + "precision_at_100": 0.2, + "precision_at_1000": 0.0545, + "precision_at_20": 0.3675, + "precision_at_3": 0.5, + "precision_at_5": 0.43, + "recall_at_1": 0.01371, + "recall_at_10": 0.10987, + "recall_at_100": 0.39011, + "recall_at_1000": 1.0, + "recall_at_20": 0.16611, + "recall_at_3": 0.03724, + "recall_at_5": 0.0515 + }, + "original": { + "map_at_1": 0.0046, + "map_at_10": 0.04401, + "map_at_100": 0.11021, + "map_at_1000": 0.14877, + "map_at_20": 0.05942, + "map_at_3": 0.01273, + "map_at_5": 0.02006, + "mrr_at_1": 0.15, + "mrr_at_10": 0.2977, + "mrr_at_100": 0.3059, + "mrr_at_1000": 0.3059, + "mrr_at_20": 0.30103, + "mrr_at_3": 0.21667, + "mrr_at_5": 0.25417, + "ndcg_at_1": 0.15, + "ndcg_at_10": 0.18399, + "ndcg_at_100": 0.27067, + "ndcg_at_1000": 0.49281, + "ndcg_at_20": 0.18583, + "ndcg_at_3": 0.16327, + "ndcg_at_5": 0.16505, + "precision_at_1": 0.15, + "precision_at_10": 0.22, + "precision_at_100": 0.1145, + "precision_at_1000": 0.0327, + "precision_at_20": 0.1825, + "precision_at_3": 0.18333, + "precision_at_5": 0.19, + "recall_at_1": 0.0046, + "recall_at_10": 0.08405, + "recall_at_100": 0.35413, + "recall_at_1000": 1.0, + "recall_at_20": 0.12894, + "recall_at_3": 0.01565, + "recall_at_5": 0.03152 + } + }, + "p-MRR": -0.005396514029371525 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/News21InstructionRetrieval.json b/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/News21InstructionRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..0e9d3c981a4e0f354a29a33efc8f1e7fc6de12dd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/News21InstructionRetrieval.json @@ -0,0 +1,86 @@ +{ + "dataset_revision": "fae860d870141ee0f34fbab67314c65f41921943", + "mteb_dataset_name": "News21InstructionRetrieval", + "mteb_version": "1.6.11", + "test": { + "evaluation_time": 97.59, + "individual": { + "base": {}, + "changed": { + "map_at_1": 0.02579, + "map_at_10": 0.1389, + "map_at_100": 0.264, + "map_at_1000": 0.30462, + "map_at_20": 0.19711, + "map_at_3": 0.06963, + "map_at_5": 0.09229, + "mrr_at_1": 0.625, + "mrr_at_10": 0.74062, + "mrr_at_100": 0.74239, + "mrr_at_1000": 0.74239, + "mrr_at_20": 0.74062, + "mrr_at_3": 0.72396, + "mrr_at_5": 0.73021, + "ndcg_at_1": 0.35938, + "ndcg_at_10": 0.36023, + "ndcg_at_100": 0.44544, + "ndcg_at_1000": 0.58596, + "ndcg_at_20": 0.37638, + "ndcg_at_3": 0.35647, + "ndcg_at_5": 0.33994, + "precision_at_1": 0.625, + "precision_at_10": 0.46562, + "precision_at_100": 0.15688, + "precision_at_1000": 0.03391, + "precision_at_20": 0.375, + "precision_at_3": 0.57292, + "precision_at_5": 0.51875, + "recall_at_1": 0.02579, + "recall_at_10": 0.18181, + "recall_at_100": 0.50348, + "recall_at_1000": 1.0, + "recall_at_20": 0.28046, + "recall_at_3": 0.07776, + "recall_at_5": 0.10781 + }, + "original": { + "map_at_1": 0.02807, + "map_at_10": 0.12177, + "map_at_100": 0.19463, + "map_at_1000": 0.21976, + "map_at_20": 0.15485, + "map_at_3": 0.07547, + "map_at_5": 0.09118, + "mrr_at_1": 0.375, + "mrr_at_10": 0.51228, + "mrr_at_100": 0.52401, + "mrr_at_1000": 0.52401, + "mrr_at_20": 0.52013, + "mrr_at_3": 0.47396, + "mrr_at_5": 0.4974, + "ndcg_at_1": 0.22656, + "ndcg_at_10": 0.2646, + "ndcg_at_100": 0.37758, + "ndcg_at_1000": 0.49267, + "ndcg_at_20": 0.29666, + "ndcg_at_3": 0.2429, + "ndcg_at_5": 0.2419, + "precision_at_1": 0.375, + "precision_at_10": 0.2625, + "precision_at_100": 0.08781, + "precision_at_1000": 0.01916, + "precision_at_20": 0.2125, + "precision_at_3": 0.35417, + "precision_at_5": 0.3125, + "recall_at_1": 0.02807, + "recall_at_10": 0.18958, + "recall_at_100": 0.50293, + "recall_at_1000": 1.0, + "recall_at_20": 0.28077, + "recall_at_3": 0.09085, + "recall_at_5": 0.1203 + } + }, + "p-MRR": -0.02444899964735557 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/Robust04InstructionRetrieval.json b/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/Robust04InstructionRetrieval.json new file mode 100644 index 0000000000000000000000000000000000000000..d7343ac1695c3867954067d31759bf26bf7b6b3e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__e5-small-v2/Robust04InstructionRetrieval.json @@ -0,0 +1,86 @@ +{ + "dataset_revision": "d25e82647b09251f4feaaf5462c9a18445072ba6", + "mteb_dataset_name": "Robust04InstructionRetrieval", + "mteb_version": "1.6.11", + "test": { + "evaluation_time": 171.65, + "individual": { + "base": {}, + "changed": { + "map_at_1": 0.02748, + "map_at_10": 0.09657, + "map_at_100": 0.18186, + "map_at_1000": 0.22299, + "map_at_20": 0.12866, + "map_at_3": 0.05184, + "map_at_5": 0.07353, + "mrr_at_1": 0.5, + "mrr_at_10": 0.61658, + "mrr_at_100": 0.62214, + "mrr_at_1000": 0.62214, + "mrr_at_20": 0.61987, + "mrr_at_3": 0.59295, + "mrr_at_5": 0.59776, + "ndcg_at_1": 0.45192, + "ndcg_at_10": 0.32644, + "ndcg_at_100": 0.37766, + "ndcg_at_1000": 0.58003, + "ndcg_at_20": 0.31784, + "ndcg_at_3": 0.37514, + "ndcg_at_5": 0.35409, + "precision_at_1": 0.5, + "precision_at_10": 0.31731, + "precision_at_100": 0.12423, + "precision_at_1000": 0.03369, + "precision_at_20": 0.25865, + "precision_at_3": 0.4359, + "precision_at_5": 0.39231, + "recall_at_1": 0.02748, + "recall_at_10": 0.13644, + "recall_at_100": 0.46149, + "recall_at_1000": 1.0, + "recall_at_20": 0.21836, + "recall_at_3": 0.05978, + "recall_at_5": 0.08836 + }, + "original": { + "map_at_1": 0.02667, + "map_at_10": 0.08334, + "map_at_100": 0.13998, + "map_at_1000": 0.16288, + "map_at_20": 0.10551, + "map_at_3": 0.0505, + "map_at_5": 0.06151, + "mrr_at_1": 0.28846, + "mrr_at_10": 0.42614, + "mrr_at_100": 0.43544, + "mrr_at_1000": 0.43562, + "mrr_at_20": 0.43204, + "mrr_at_3": 0.38141, + "mrr_at_5": 0.41026, + "ndcg_at_1": 0.25, + "ndcg_at_10": 0.22952, + "ndcg_at_100": 0.31225, + "ndcg_at_1000": 0.48125, + "ndcg_at_20": 0.24009, + "ndcg_at_3": 0.23574, + "ndcg_at_5": 0.22559, + "precision_at_1": 0.28846, + "precision_at_10": 0.19231, + "precision_at_100": 0.07346, + "precision_at_1000": 0.01981, + "precision_at_20": 0.15385, + "precision_at_3": 0.27564, + "precision_at_5": 0.23846, + "recall_at_1": 0.02667, + "recall_at_10": 0.16098, + "recall_at_100": 0.45154, + "recall_at_1000": 1.0, + "recall_at_20": 0.24099, + "recall_at_3": 0.06236, + "recall_at_5": 0.08695 + } + }, + "p-MRR": -0.0637798493711026 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CroatianSentimentClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CroatianSentimentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..697cf901219dff080373d33891b4efdb5bb55013 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CroatianSentimentClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "255da5c6b54c95faf74aba6d6cad9b2e176bf90a", + "mteb_dataset_name": "CroatianSentimentClassification", + "mteb_version": "1.6.12", + "test": { + "accuracy": 0.8375286041189931, + "accuracy_stderr": 0.0478474326359181, + "ap": 0.9221356714204483, + "ap_stderr": 0.012570910821905763, + "evaluation_time": 4.07, + "f1": 0.794360657998703, + "f1_stderr": 0.04800656095207088, + "main_score": 0.8375286041189931 + }, + "validation": { + "accuracy": 0.8481308411214954, + "accuracy_stderr": 0.04010260662890943, + "ap": 0.9192735157346776, + "ap_stderr": 0.012943252123811309, + "evaluation_time": 6.14, + "f1": 0.7947534997573775, + "f1_stderr": 0.03996716633708439, + "main_score": 0.8481308411214954 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CrossLingualSemanticDiscriminationWMT19.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CrossLingualSemanticDiscriminationWMT19.json new file mode 100644 index 0000000000000000000000000000000000000000..9f729e28e530297a272c669350b5c141def11817 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CrossLingualSemanticDiscriminationWMT19.json @@ -0,0 +1,82 @@ +{ + "dataset_revision": "9627fbdb39b827ee5c066011ebe1e947cdb137bd", + "mteb_dataset_name": "CrossLingualSemanticDiscriminationWMT19", + "mteb_version": "1.8.0", + "test": { + "deu-fra": { + "map_at_1": 0.8683, + "map_at_10": 0.92051, + "map_at_100": 0.92051, + "map_at_1000": 0.92051, + "map_at_20": 0.92051, + "map_at_3": 0.9131, + "map_at_5": 0.9202, + "mrr_at_1": 0.8683, + "mrr_at_10": 0.92051, + "mrr_at_100": 0.92051, + "mrr_at_1000": 0.92051, + "mrr_at_20": 0.92051, + "mrr_at_3": 0.9131, + "mrr_at_5": 0.9202, + "ndcg_at_1": 0.8683, + "ndcg_at_10": 0.94046, + "ndcg_at_100": 0.94046, + "ndcg_at_1000": 0.94046, + "ndcg_at_20": 0.94046, + "ndcg_at_3": 0.92694, + "ndcg_at_5": 0.93976, + "precision_at_1": 0.8683, + "precision_at_10": 0.1, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.05, + "precision_at_3": 0.32224, + "precision_at_5": 0.19959, + "recall_at_1": 0.8683, + "recall_at_10": 1.0, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 1.0, + "recall_at_3": 0.96673, + "recall_at_5": 0.99796 + }, + "evaluation_time": 260.86, + "fra-deu": { + "map_at_1": 0.90224, + "map_at_10": 0.94336, + "map_at_100": 0.94336, + "map_at_1000": 0.94336, + "map_at_20": 0.94336, + "map_at_3": 0.93901, + "map_at_5": 0.94336, + "mrr_at_1": 0.90224, + "mrr_at_10": 0.94336, + "mrr_at_100": 0.94336, + "mrr_at_1000": 0.94336, + "mrr_at_20": 0.94336, + "mrr_at_3": 0.93901, + "mrr_at_5": 0.94336, + "ndcg_at_1": 0.90224, + "ndcg_at_10": 0.95773, + "ndcg_at_100": 0.95773, + "ndcg_at_1000": 0.95773, + "ndcg_at_20": 0.95773, + "ndcg_at_3": 0.95004, + "ndcg_at_5": 0.95773, + "precision_at_1": 0.90224, + "precision_at_10": 0.1, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.05, + "precision_at_3": 0.32722, + "precision_at_5": 0.2, + "recall_at_1": 0.90224, + "recall_at_10": 1.0, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 1.0, + "recall_at_3": 0.98167, + "recall_at_5": 1.0 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CrossLingualSemanticDiscriminationWMT21.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CrossLingualSemanticDiscriminationWMT21.json new file mode 100644 index 0000000000000000000000000000000000000000..882c583350c60b9d9c043cd189e04798b5f6851c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CrossLingualSemanticDiscriminationWMT21.json @@ -0,0 +1,82 @@ +{ + "dataset_revision": "9627fbdb39b827ee5c066011ebe1e947cdb137bd", + "mteb_dataset_name": "CrossLingualSemanticDiscriminationWMT21", + "mteb_version": "1.8.0", + "test": { + "deu-fra": { + "map_at_1": 0.79507, + "map_at_10": 0.87592, + "map_at_100": 0.87592, + "map_at_1000": 0.87592, + "map_at_20": 0.87592, + "map_at_3": 0.8632, + "map_at_5": 0.87557, + "mrr_at_1": 0.79507, + "mrr_at_10": 0.87592, + "mrr_at_100": 0.87592, + "mrr_at_1000": 0.87592, + "mrr_at_20": 0.87592, + "mrr_at_3": 0.8632, + "mrr_at_5": 0.87557, + "ndcg_at_1": 0.79507, + "ndcg_at_10": 0.90702, + "ndcg_at_100": 0.90702, + "ndcg_at_1000": 0.90702, + "ndcg_at_20": 0.90702, + "ndcg_at_3": 0.88379, + "ndcg_at_5": 0.90624, + "precision_at_1": 0.79507, + "precision_at_10": 0.1, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.05, + "precision_at_3": 0.3143, + "precision_at_5": 0.19955, + "recall_at_1": 0.79507, + "recall_at_10": 1.0, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 1.0, + "recall_at_3": 0.94289, + "recall_at_5": 0.99776 + }, + "evaluation_time": 181.0, + "fra-deu": { + "map_at_1": 0.85554, + "map_at_10": 0.91394, + "map_at_100": 0.91394, + "map_at_1000": 0.91394, + "map_at_20": 0.91394, + "map_at_3": 0.90687, + "map_at_5": 0.91376, + "mrr_at_1": 0.85554, + "mrr_at_10": 0.91394, + "mrr_at_100": 0.91394, + "mrr_at_1000": 0.91394, + "mrr_at_20": 0.91394, + "mrr_at_3": 0.90687, + "mrr_at_5": 0.91376, + "ndcg_at_1": 0.85554, + "ndcg_at_10": 0.93563, + "ndcg_at_100": 0.93563, + "ndcg_at_1000": 0.93563, + "ndcg_at_20": 0.93563, + "ndcg_at_3": 0.9228, + "ndcg_at_5": 0.93523, + "precision_at_1": 0.85554, + "precision_at_10": 0.1, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.05, + "precision_at_3": 0.32288, + "precision_at_5": 0.19978, + "recall_at_1": 0.85554, + "recall_at_10": 1.0, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 1.0, + "recall_at_3": 0.96865, + "recall_at_5": 0.99888 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CzechSubjectivityClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CzechSubjectivityClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..4cadac474ad6de6921123b2e0311b5eb5169f32d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/CzechSubjectivityClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "e387ddf167f3eba99936cff89909ed6264f17e1f", + "mteb_dataset_name": "CzechSubjectivityClassification", + "mteb_version": "1.6.12", + "test": { + "accuracy": 0.8290000000000001, + "accuracy_stderr": 0.02445812748351761, + "ap": 0.7869781062955277, + "ap_stderr": 0.03385583660272538, + "evaluation_time": 4.92, + "f1": 0.8278784196287988, + "f1_stderr": 0.024992957553718758, + "main_score": 0.8290000000000001 + }, + "validation": { + "accuracy": 0.819, + "accuracy_stderr": 0.02668707552355632, + "ap": 0.7691718361560658, + "ap_stderr": 0.03470470857048776, + "evaluation_time": 5.7, + "f1": 0.8181910083784236, + "f1_stderr": 0.026792118474868454, + "main_score": 0.819 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/DutchBookReviewSentimentClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/DutchBookReviewSentimentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..af52be675645e18dbcf6b932d59facae39d03d99 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/DutchBookReviewSentimentClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "3f756ab4572e071eb53e887ab629f19fa747d39e", + "mteb_dataset_name": "DutchBookReviewSentimentClassification", + "mteb_version": "1.6.12", + "test": { + "accuracy": 0.6610611510791367, + "accuracy_stderr": 0.026759302033665153, + "ap": 0.6093173995126039, + "ap_stderr": 0.024994190612424715, + "evaluation_time": 22.33, + "f1": 0.6586589515246717, + "f1_stderr": 0.027130552253396244, + "main_score": 0.6610611510791367 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/HotelReviewSentimentClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/HotelReviewSentimentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..3bbad04815a903e85cea29ab2bd981428ec8c8e0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/HotelReviewSentimentClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "b108d2c32ee4e1f4176ea233e1a5ac17bceb9ef9", + "mteb_dataset_name": "HotelReviewSentimentClassification", + "mteb_version": "1.6.3", + "train": { + "accuracy": 0.52529296875, + "accuracy_stderr": 0.023042322776057444, + "evaluation_time": 381.93, + "f1": 0.5084084837840069, + "f1_stderr": 0.020023125484070918, + "main_score": 0.52529296875 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/IsiZuluNewsClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/IsiZuluNewsClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..1cc7cb234698296d0b3dc9dee0f04fb4ebc10306 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/IsiZuluNewsClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "55caf0e52693a1ea63b15a4980a73fc137fb862b", + "mteb_dataset_name": "IsiZuluNewsClassification", + "mteb_version": "1.6.12", + "train": { + "accuracy": 0.32273936170212764, + "accuracy_stderr": 0.01859849001717525, + "evaluation_time": 5.97, + "f1": 0.24860623758504694, + "f1_stderr": 0.011676300953201775, + "main_score": 0.32273936170212764 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/LegalBenchPC.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/LegalBenchPC.json new file mode 100644 index 0000000000000000000000000000000000000000..8a1638ad2251d3b2a55f9fd3a94adaddb1dd4b3b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/LegalBenchPC.json @@ -0,0 +1,49 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "LegalBenchPC", + "mteb_version": "1.7.7", + "test": { + "cos_sim": { + "accuracy": 0.66015625, + "accuracy_threshold": 0.7808122634887695, + "ap": 0.7262082113371351, + "f1": 0.7576150356448478, + "f1_threshold": 0.764856219291687, + "precision": 0.6268096514745308, + "recall": 0.9574119574119574 + }, + "dot": { + "accuracy": 0.66015625, + "accuracy_threshold": 0.7808123826980591, + "ap": 0.726208512243574, + "f1": 0.7576150356448478, + "f1_threshold": 0.7648563385009766, + "precision": 0.6268096514745308, + "recall": 0.9574119574119574 + }, + "euclidean": { + "accuracy": 0.66015625, + "accuracy_threshold": 0.6620992422103882, + "ap": 0.7262082113371351, + "f1": 0.7576150356448478, + "f1_threshold": 0.685775101184845, + "precision": 0.6268096514745308, + "recall": 0.9574119574119574 + }, + "evaluation_time": 267.43, + "manhattan": { + "accuracy": 0.66064453125, + "accuracy_threshold": 14.39570426940918, + "ap": 0.7293593422078586, + "f1": 0.7577437235083143, + "f1_threshold": 15.084949493408203, + "precision": 0.6294691224268689, + "recall": 0.9516789516789517 + }, + "max": { + "accuracy": 0.66064453125, + "ap": 0.7293593422078586, + "f1": 0.7577437235083143 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..1455c577fbd48df9d3abfb3da98987e382e3cc69 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDLegalBenchClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "MAUDLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.303125, + "accuracy_stderr": 0.03594479694092072, + "evaluation_time": 2129.83, + "f1": 0.14747100659267248, + "f1_stderr": 0.017269480781280228, + "main_score": 0.303125 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDSpecificPerformanceLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDSpecificPerformanceLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..fab4688e007d73d8d6a91c00ff9988ba89f792dd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDSpecificPerformanceLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "MAUDSpecificPerformanceLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.9259259259259259, + "accuracy_stderr": 0.0, + "ap": 0.9259259259259259, + "ap_stderr": 0.0, + "evaluation_time": 219.85, + "f1": 0.4807692307692307, + "f1_stderr": 0.0, + "main_score": 0.9259259259259259 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDTailPeriodLengthLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDTailPeriodLengthLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..87c1fbd8a2ba17b37243c339d09becd353dc6fc5 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MAUDTailPeriodLengthLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "MAUDTailPeriodLengthLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.9308176100628932, + "accuracy_stderr": 1.1102230246251565e-16, + "ap": 0.06918238993710692, + "ap_stderr": 0.0, + "evaluation_time": 199.03, + "f1": 0.482084690553746, + "f1_stderr": 5.551115123125783e-17, + "main_score": 0.9308176100628932 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MalteseSentimentClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MalteseSentimentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..14a664f04ce0e1a8f2a79dfb4dad9c20c023599a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/MalteseSentimentClassification.json @@ -0,0 +1,25 @@ +{ + "dataset_revision": "fd47b916f9ebb5d27b0e583de9d2b2db39f7dda2", + "mteb_dataset_name": "MalteseSentimentClassification", + "mteb_version": "1.6.12", + "test": { + "accuracy": 0.616374269005848, + "accuracy_stderr": 0.049289608200480645, + "ap": 0.4288380220091102, + "ap_stderr": 0.028801107429921743, + "evaluation_time": 3.57, + "f1": 0.6024006236050181, + "f1_stderr": 0.03959773046123066, + "main_score": 0.616374269005848 + }, + "validation": { + "accuracy": 0.6129411764705882, + "accuracy_stderr": 0.0847467221809506, + "ap": 0.4305214991409484, + "ap_stderr": 0.059136739653642974, + "evaluation_time": 5.31, + "f1": 0.6013607939369721, + "f1_stderr": 0.07750106710370977, + "main_score": 0.6129411764705882 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/RestaurantReviewSentimentClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/RestaurantReviewSentimentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..6390f25e547742afefe38df8555fd257ff39d734 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/RestaurantReviewSentimentClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "d51bf2435d030e0041344f576c5e8d7154828977", + "mteb_dataset_name": "RestaurantReviewSentimentClassification", + "mteb_version": "1.6.3", + "train": { + "accuracy": 0.659228515625, + "accuracy_stderr": 0.0550964108677601, + "ap": 0.785113572000111, + "ap_stderr": 0.025344300641545324, + "evaluation_time": 549.73, + "f1": 0.6356722247835973, + "f1_stderr": 0.0467693398694135, + "main_score": 0.659228515625 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/TweetEmotionClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/TweetEmotionClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..1d9ccc0e857b44d132fd518859e57cc5d66ad1ec --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/TweetEmotionClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "0ded8ff72cc68cbb7bb5c01b0a9157982b73ddaf", + "mteb_dataset_name": "TweetEmotionClassification", + "mteb_version": "1.6.3", + "train": { + "accuracy": 0.532861328125, + "accuracy_stderr": 0.021630418484502263, + "evaluation_time": 263.41, + "f1": 0.4787353305232833, + "f1_stderr": 0.028535356399510787, + "main_score": 0.532861328125 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/TweetSarcasmClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/TweetSarcasmClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..e5924e7e18804a475a3052ce09f76b723e3468db --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/TweetSarcasmClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "557bf94ac6177cc442f42d0b09b6e4b76e8f47c9", + "mteb_dataset_name": "TweetSarcasmClassification", + "mteb_version": "1.6.3", + "test": { + "accuracy": 0.6098578199052133, + "accuracy_stderr": 0.052989427508568276, + "ap": 0.21492854478500573, + "ap_stderr": 0.02064430303688515, + "evaluation_time": 267.47, + "f1": 0.5339694484176662, + "f1_stderr": 0.034135223698889265, + "main_score": 0.6098578199052133 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/UyghurSentimentClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/UyghurSentimentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..a29af74420695c53f75d27bcdd1216ba53e91a83 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/UyghurSentimentClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "f859e1cdf823b3b27aa27b0f0fd4cef1d66f4b38", + "mteb_dataset_name": "UyghurSentimentClassification", + "mteb_version": "1.6.12", + "test": { + "accuracy": 0.7832342449464923, + "accuracy_stderr": 0.08295357809350863, + "ap": 0.9501647123042005, + "ap_stderr": 0.009632335013725105, + "evaluation_time": 7.17, + "f1": 0.6751109607537314, + "f1_stderr": 0.07105414994195818, + "main_score": 0.7832342449464923 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/model_meta.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/model_meta.json new file mode 100644 index 0000000000000000000000000000000000000000..7a830bdc50ba6358dbdc428d47fc027af2ad4bd3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-base/model_meta.json @@ -0,0 +1 @@ +{"model_name": "intfloat/multilingual-e5-base", "versions": null} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AJGT.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AJGT.json new file mode 100644 index 0000000000000000000000000000000000000000..f975a84c4b90b2bab663585d35534505efb8d3ee --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AJGT.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "af3f2fa5462ac461b696cb300d66e07ad366057f", + "mteb_dataset_name": "AJGT", + "mteb_version": "1.7.39", + "train": { + "accuracy": 0.7455, + "accuracy_stderr": 0.036028495238294406, + "ap": 0.6896375988988064, + "ap_stderr": 0.039844718636412035, + "evaluation_time": 35.8, + "f1": 0.7445024649282719, + "f1_stderr": 0.03608961520036871, + "main_score": 0.7455 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AfriSentiClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AfriSentiClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..192d760b584669b4634f805ec3a422be313430d0 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AfriSentiClassification.json @@ -0,0 +1,92 @@ +{ + "dataset_revision": "b52e930385cf5ed7f063072c3f7bd17b599a16cf", + "mteb_dataset_name": "AfriSentiClassification", + "mteb_version": "1.6.36", + "test": { + "amh": { + "accuracy": 0.4023511755877939, + "accuracy_stderr": 0.09741272692547527, + "f1": 0.35717398035656933, + "f1_stderr": 0.05822676290939817, + "main_score": 0.4023511755877939 + }, + "arq": { + "accuracy": 0.44415448851774536, + "accuracy_stderr": 0.04926499839587244, + "f1": 0.40949437468527294, + "f1_stderr": 0.037032510359058204, + "main_score": 0.44415448851774536 + }, + "ary": { + "accuracy": 0.4451171875, + "accuracy_stderr": 0.06052214086524128, + "f1": 0.4306871550490598, + "f1_stderr": 0.06206643822520438, + "main_score": 0.4451171875 + }, + "evaluation_time": 3735.44, + "hau": { + "accuracy": 0.72666015625, + "accuracy_stderr": 0.06551955698424791, + "f1": 0.3817245217083026, + "f1_stderr": 0.05076236699659615, + "main_score": 0.72666015625 + }, + "ibo": { + "accuracy": 0.46474609375, + "accuracy_stderr": 0.035738585243679226, + "f1": 0.31494100575316597, + "f1_stderr": 0.03484975123316935, + "main_score": 0.46474609375 + }, + "kin": { + "accuracy": 0.4664717348927875, + "accuracy_stderr": 0.022900128157944896, + "f1": 0.46396142791797523, + "f1_stderr": 0.02521199036981062, + "main_score": 0.4664717348927875 + }, + "pcm": { + "accuracy": 0.383544921875, + "accuracy_stderr": 0.04474172543366532, + "f1": 0.30625040675173204, + "f1_stderr": 0.027452280984405636, + "main_score": 0.383544921875 + }, + "por": { + "accuracy": 0.420703125, + "accuracy_stderr": 0.0698143841413231, + "f1": 0.40518646763598765, + "f1_stderr": 0.047767693430091566, + "main_score": 0.420703125 + }, + "swa": { + "accuracy": 0.4189839572192514, + "accuracy_stderr": 0.03792269658073026, + "f1": 0.3892334808550868, + "f1_stderr": 0.0367701326461163, + "main_score": 0.4189839572192514 + }, + "tso": { + "accuracy": 0.35078740157480315, + "accuracy_stderr": 0.023321579873417143, + "f1": 0.3222336006004757, + "f1_stderr": 0.013080415572393108, + "main_score": 0.35078740157480315 + }, + "twi": { + "accuracy": 0.3742887249736565, + "accuracy_stderr": 0.023884377772825607, + "f1": 0.3538491286462195, + "f1_stderr": 0.0231346705428414, + "main_score": 0.3742887249736565 + }, + "yor": { + "accuracy": 0.284716796875, + "accuracy_stderr": 0.15692169011374915, + "f1": 0.18208390588236606, + "f1_stderr": 0.07151119825120113, + "main_score": 0.284716796875 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AfriSentiLangClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AfriSentiLangClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..69bc47ad9585a3bab2e50ffa31389b670f5ff550 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AfriSentiLangClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "f17cb5f3ec522ac604601fd09db9fd644ac66ca5", + "mteb_dataset_name": "AfriSentiLangClassification", + "mteb_version": "1.7.27", + "test": { + "accuracy": 0.681982421875, + "accuracy_stderr": 0.017141453878552912, + "evaluation_time": 19.8, + "f1": 0.6560546800984801, + "f1_stderr": 0.017047608952243772, + "main_score": 0.681982421875 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringP2P.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringP2P.json new file mode 100644 index 0000000000000000000000000000000000000000..681ebd0ca3fdd124cad1eb885b31dbec6f9762f1 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringP2P.json @@ -0,0 +1,32 @@ +{ + "dataset_revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "evaluation_time": 44.24563717842102, + "kg_co2_emissions": null, + "mteb_version": "1.11.17", + "scores": { + "test": [ + { + "hf_subset": "default", + "languages": [ + "fra-Latn" + ], + "main_score": 0.6088563017856843, + "v_measure": 0.6088563017856843, + "v_measure_std": 0.021933990097708713, + "v_measures": [ + 0.6170502194490486, + 0.6171210109988788, + 0.5853464395898385, + 0.6162887184201721, + 0.6193773282993238, + 0.5755214824748281, + 0.6470272701271494, + 0.5742622118440345, + 0.6209444012185718, + 0.6156239354349972 + ] + } + ] + }, + "task_name": "AlloProfClusteringP2P" +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringS2S.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringS2S.json new file mode 100644 index 0000000000000000000000000000000000000000..e0c4e7264344982ede73b633bec27232b70ab848 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringS2S.json @@ -0,0 +1,32 @@ +{ + "dataset_revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "evaluation_time": 6.473612070083618, + "kg_co2_emissions": null, + "mteb_version": "1.11.17", + "scores": { + "test": [ + { + "hf_subset": "default", + "languages": [ + "fra-Latn" + ], + "main_score": 0.32521590908131975, + "v_measure": 0.32521590908131975, + "v_measure_std": 0.04261122855800503, + "v_measures": [ + 0.3331079965581598, + 0.3548199947531167, + 0.2947004104713659, + 0.40719295974748043, + 0.37688686217343476, + 0.33482040213006, + 0.30744121040473155, + 0.2660392746267603, + 0.30086293127942865, + 0.2762870486686597 + ] + } + ] + }, + "task_name": "AlloProfClusteringS2S" +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringS2S.v2.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringS2S.v2.json new file mode 100644 index 0000000000000000000000000000000000000000..97cac33ede09d2f167189d8ad7ea7d7955df58ab --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloProfClusteringS2S.v2.json @@ -0,0 +1,33 @@ +{ + "dataset_revision": "392ba3f5bcc8c51f578786c1fc3dae648662cb9b", + "evaluation_time": 6.610172271728516, + "kg_co2_emissions": null, + "mteb_version": "1.11.17", + "scores": { + "test": [ + { + "hf_subset": "default", + "languages": [ + "fra-Latn" + ], + "main_score": 0.31782928576651176, + "v_measure": 0.31782928576651176, + "v_measures": { + "Level 0": [ + 0.3454717761185842, + 0.31308057407437195, + 0.2920199127081912, + 0.3384697045982106, + 0.3034449503952685, + 0.36458223933413747, + 0.2823031636094184, + 0.3215270427388991, + 0.31542021883689064, + 0.30197327525114565 + ] + } + } + ] + }, + "task_name": "AlloProfClusteringS2S.v2" +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloprofReranking.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloprofReranking.json new file mode 100644 index 0000000000000000000000000000000000000000..d07178ea3ed52da1e9593fb9ba6421a2d24f38ef --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/AlloprofReranking.json @@ -0,0 +1,20 @@ +{ + "dataset_revision": "65393d0d7a08a10b4e348135e824f385d420b0fd", + "evaluation_time": 14.704835414886475, + "kg_co2_emissions": null, + "mteb_version": "1.11.16", + "scores": { + "test": [ + { + "hf_subset": "default", + "languages": [ + "fra-Latn" + ], + "main_score": 0.6527671789828227, + "map": 0.6527671789828227, + "mrr": 0.6673253008745237 + } + ] + }, + "task_name": "AlloprofReranking" +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/ArEntail.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/ArEntail.json new file mode 100644 index 0000000000000000000000000000000000000000..51d960b6ff1b3de44ad6b26067dfe76f8f179b2e --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/ArEntail.json @@ -0,0 +1,49 @@ +{ + "dataset_revision": "4da4316c6e3287746ab74ff67dd252ad128fceff", + "mteb_dataset_name": "ArEntail", + "mteb_version": "1.8.10", + "test": { + "cos_sim": { + "accuracy": 0.734, + "accuracy_threshold": 0.9073306322097778, + "ap": 0.7938231998979896, + "f1": 0.7550675675675675, + "f1_threshold": 0.8919888734817505, + "precision": 0.6535087719298246, + "recall": 0.894 + }, + "dot": { + "accuracy": 0.734, + "accuracy_threshold": 0.9073307514190674, + "ap": 0.7938231998979896, + "f1": 0.7550675675675675, + "f1_threshold": 0.8919887542724609, + "precision": 0.6535087719298246, + "recall": 0.894 + }, + "euclidean": { + "accuracy": 0.734, + "accuracy_threshold": 0.43050986528396606, + "ap": 0.7938231998979896, + "f1": 0.7550675675675675, + "f1_threshold": 0.464781790971756, + "precision": 0.6535087719298246, + "recall": 0.894 + }, + "evaluation_time": 4.4, + "manhattan": { + "accuracy": 0.737, + "accuracy_threshold": 6.7003984451293945, + "ap": 0.789496298019098, + "f1": 0.7522768670309654, + "f1_threshold": 6.960292816162109, + "precision": 0.6906354515050167, + "recall": 0.826 + }, + "max": { + "accuracy": 0.737, + "ap": 0.7938231998979896, + "f1": 0.7550675675675675 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/ArXivHierarchicalClusteringP2P.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/ArXivHierarchicalClusteringP2P.json new file mode 100644 index 0000000000000000000000000000000000000000..38a7ee9d5776e6dd509b84b2d972b27c66d266b8 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/ArXivHierarchicalClusteringP2P.json @@ -0,0 +1,45 @@ +{ + "dataset_revision": "0bbdb47bcbe3a90093699aefeed338a0f28a7ee8", + "evaluation_time": 148.84170842170715, + "kg_co2_emissions": null, + "mteb_version": "1.11.6", + "scores": { + "test": [ + { + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.39941294299601454, + "v_measure": 0.39941294299601454, + "v_measures": { + "Level 0": [ + 0.4196898784897407, + 0.4008535729096411, + 0.426880339670921, + 0.4077233280311405, + 0.44643944689857523, + 0.41474493104582166, + 0.42517616958361126, + 0.397142177403971, + 0.4212660634498137, + 0.4035371154939677 + ], + "Level 1": [ + 0.37867716327953904, + 0.3961882371364308, + 0.3867532102268762, + 0.3652978631407815, + 0.3857803447101778, + 0.39408456920962986, + 0.3751251123646474, + 0.38192026869306556, + 0.37893404775574907, + 0.3820450204261899 + ] + } + } + ] + }, + "task_name": "ArXivHierarchicalClusteringP2P" +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/BengaliHateSpeechClassification.json b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/BengaliHateSpeechClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..f872efbd49fd0c7234d422c64a9c27d15e34f29b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/results/intfloat__multilingual-e5-small/BengaliHateSpeechClassification.json @@ -0,0 +1,13 @@ +{ + "dataset_revision": "99612296bc093f0720cac7d7cbfcb67eecf1ca2f", + "mteb_dataset_name": "BengaliHateSpeechClassification", + "mteb_version": "1.7.11", + "train": { + "accuracy": 0.579931640625, + "accuracy_stderr": 0.0310817384762391, + "evaluation_time": 9.58, + "f1": 0.492375715944452, + "f1_stderr": 0.032669816416586527, + "main_score": 0.492375715944452 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/CLSClusteringS2S.v2.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/CLSClusteringS2S.v2.json new file mode 100644 index 0000000000000000000000000000000000000000..9ae7d23a1f0ec754a35a0f7b855437adffea49e1 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/CLSClusteringS2S.v2.json @@ -0,0 +1,23 @@ +{ + "dataset_revision": "e458b3f5414b62b7f9f83499ac1f5497ae2e869f", + "mteb_dataset_name": "CLSClusteringS2S.v2", + "mteb_version": "1.8.3", + "test": { + "evaluation_time": 2.49, + "v_measure": 0.37315547200856214, + "v_measures": { + "Level 0": [ + 0.3742726381494557, + 0.40959148625703823, + 0.36965063530756526, + 0.338471841244639, + 0.36357288846492314, + 0.3609549199748587, + 0.4109703244985918, + 0.3746561872473064, + 0.36891177878950393, + 0.3605020201517393 + ] + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/CSFDCZMovieReviewSentimentClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/CSFDCZMovieReviewSentimentClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..0b9ca7f9f069d86cf693d2ff9ec0b4102bc8f6fd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/CSFDCZMovieReviewSentimentClassification.json @@ -0,0 +1,53 @@ +{ + "dataset_revision": "dd2ede6faaea338ef6b1e2966f06808656975a23", + "mteb_dataset_name": "CSFDCZMovieReviewSentimentClassification", + "mteb_version": "1.9.0", + "test": { + "accuracy": 0.2755859375, + "evaluation_time": 572.32, + "f1": 0.2618690642990439, + "main_score": 0.2755859375, + "scores_per_experiment": [ + { + "accuracy": 0.267578125, + "f1": 0.24692410982804047 + }, + { + "accuracy": 0.27783203125, + "f1": 0.2652829520485363 + }, + { + "accuracy": 0.28369140625, + "f1": 0.27520570520211146 + }, + { + "accuracy": 0.2744140625, + "f1": 0.25425181877634134 + }, + { + "accuracy": 0.2626953125, + "f1": 0.25320739340224707 + }, + { + "accuracy": 0.2734375, + "f1": 0.2605558770195037 + }, + { + "accuracy": 0.29052734375, + "f1": 0.2835394143031402 + }, + { + "accuracy": 0.2802734375, + "f1": 0.269608815218915 + }, + { + "accuracy": 0.2626953125, + "f1": 0.23963384925117245 + }, + { + "accuracy": 0.28271484375, + "f1": 0.2704807079404303 + } + ] + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIConfidentialityOfAgreementLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIConfidentialityOfAgreementLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..83c78b9682a6568ca32452496fec734efa22773a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIConfidentialityOfAgreementLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLIConfidentialityOfAgreementLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.6341463414634145, + "accuracy_stderr": 1.1102230246251565e-16, + "ap": 0.5870138431114039, + "ap_stderr": 1.1102230246251565e-16, + "evaluation_time": 25.85, + "f1": 0.6332737030411447, + "f1_stderr": 1.1102230246251565e-16, + "main_score": 0.6341463414634145 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIInclusionOfVerballyConveyedInformationLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIInclusionOfVerballyConveyedInformationLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..9c16d2fcfb6b2ac5b4e17e9a7d554a061edc3f17 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIInclusionOfVerballyConveyedInformationLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLIInclusionOfVerballyConveyedInformationLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.3309352517985612, + "accuracy_stderr": 5.551115123125783e-17, + "ap": 0.45216221004044926, + "ap_stderr": 5.551115123125783e-17, + "evaluation_time": 29.48, + "f1": 0.302637967308626, + "f1_stderr": 0.0, + "main_score": 0.3309352517985612 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLINoticeOnCompelledDisclosureLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLINoticeOnCompelledDisclosureLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..7eacf73ac6c197a40c20143cff69caa2b3b79afd --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLINoticeOnCompelledDisclosureLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLINoticeOnCompelledDisclosureLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.8873239436619718, + "accuracy_stderr": 1.1102230246251565e-16, + "ap": 0.8627319472389894, + "ap_stderr": 1.1102230246251565e-16, + "evaluation_time": 27.87, + "f1": 0.886965174129353, + "f1_stderr": 1.1102230246251565e-16, + "main_score": 0.8873239436619718 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleAcquirementOfSimilarInformationLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleAcquirementOfSimilarInformationLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..84fb7af656f2cc3cae0d382f7480eeee2fcda8fc --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleAcquirementOfSimilarInformationLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLIPermissibleAcquirementOfSimilarInformationLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.6966292134831462, + "accuracy_stderr": 1.1102230246251565e-16, + "ap": 0.6238035788597587, + "ap_stderr": 0.0, + "evaluation_time": 25.43, + "f1": 0.6749188311688312, + "f1_stderr": 0.0, + "main_score": 0.6966292134831462 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleCopyLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleCopyLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..2ce5b349c4157fb39e52638dc23b515012c01129 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleCopyLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLIPermissibleCopyLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.367816091954023, + "accuracy_stderr": 0.0, + "ap": 0.1894199099280769, + "ap_stderr": 0.0, + "evaluation_time": 23.69, + "f1": 0.3555555555555555, + "f1_stderr": 0.0, + "main_score": 0.367816091954023 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleDevelopmentOfSimilarInformationLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleDevelopmentOfSimilarInformationLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..cfd6b914c8fcf29c226934c53fff059e2bfc82e9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissibleDevelopmentOfSimilarInformationLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLIPermissibleDevelopmentOfSimilarInformationLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.8602941176470589, + "accuracy_stderr": 0.0, + "ap": 0.7918838421444526, + "ap_stderr": 1.1102230246251565e-16, + "evaluation_time": 27.74, + "f1": 0.8593741496598639, + "f1_stderr": 0.0, + "main_score": 0.8602941176470589 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissiblePostAgreementPossessionLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissiblePostAgreementPossessionLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..abb2f8aa78035334294cc1996f19f2890f581050 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIPermissiblePostAgreementPossessionLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLIPermissiblePostAgreementPossessionLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.4414414414414415, + "accuracy_stderr": 5.551115123125783e-17, + "ap": 0.27566577566577566, + "ap_stderr": 0.0, + "evaluation_time": 28.5, + "f1": 0.4403057905009759, + "f1_stderr": 0.0, + "main_score": 0.4414414414414415 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIReturnOfConfidentialInformationLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIReturnOfConfidentialInformationLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..002f448d48c93b18b3596a894e445e6d2020c17c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLIReturnOfConfidentialInformationLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLIReturnOfConfidentialInformationLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.8484848484848484, + "accuracy_stderr": 1.1102230246251565e-16, + "ap": 0.7876716382575759, + "ap_stderr": 1.1102230246251565e-16, + "evaluation_time": 19.45, + "f1": 0.8483455882352942, + "f1_stderr": 0.0, + "main_score": 0.8484848484848484 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISharingWithEmployeesLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISharingWithEmployeesLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..6de605c1aa51212c99bfb83c845aae7ea33baddc --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISharingWithEmployeesLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLISharingWithEmployeesLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.8176470588235294, + "accuracy_stderr": 0.0, + "ap": 0.7551238947424155, + "ap_stderr": 0.0, + "evaluation_time": 29.9, + "f1": 0.8153404113669014, + "f1_stderr": 0.0, + "main_score": 0.8176470588235294 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISharingWithThirdPartiesLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISharingWithThirdPartiesLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..73dfff899f980369644529cdcf7520f41d65aa26 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISharingWithThirdPartiesLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLISharingWithThirdPartiesLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.6166666666666668, + "accuracy_stderr": 1.1102230246251565e-16, + "ap": 0.4478090766823161, + "ap_stderr": 5.551115123125783e-17, + "evaluation_time": 37.36, + "f1": 0.5859861995399847, + "f1_stderr": 0.0, + "main_score": 0.6166666666666668 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISurvivalOfObligationsLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISurvivalOfObligationsLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..e40d453051be9db64be6a3f9baa076428b3c32d9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/ContractNLISurvivalOfObligationsLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "ContractNLISurvivalOfObligationsLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.5605095541401274, + "accuracy_stderr": 0.0, + "ap": 0.5070174501318803, + "ap_stderr": 0.0, + "evaluation_time": 20.81, + "f1": 0.5602240896358543, + "f1_stderr": 0.0, + "main_score": 0.5605095541401274 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/CorporateLobbyingLegalBenchClassification.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/CorporateLobbyingLegalBenchClassification.json new file mode 100644 index 0000000000000000000000000000000000000000..9dcd7b709ae6ebaf55bdeedd960e78cbba23cb05 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/CorporateLobbyingLegalBenchClassification.json @@ -0,0 +1,15 @@ +{ + "dataset_revision": "12ca3b695563788fead87a982ad1a068284413f4", + "mteb_dataset_name": "CorporateLobbyingLegalBenchClassification", + "mteb_version": "1.7.7", + "test": { + "accuracy": 0.7040816326530612, + "accuracy_stderr": 0.0, + "ap": 0.2959183673469388, + "ap_stderr": 5.551115123125783e-17, + "evaluation_time": 288.63, + "f1": 0.4131736526946107, + "f1_stderr": 5.551115123125783e-17, + "main_score": 0.7040816326530612 + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/CrossLingualSemanticDiscriminationWMT19.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/CrossLingualSemanticDiscriminationWMT19.json new file mode 100644 index 0000000000000000000000000000000000000000..f4e20d87c0ae2015e51cc31a1ddb4e51bb1bb723 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/CrossLingualSemanticDiscriminationWMT19.json @@ -0,0 +1,82 @@ +{ + "dataset_revision": "9627fbdb39b827ee5c066011ebe1e947cdb137bd", + "mteb_dataset_name": "CrossLingualSemanticDiscriminationWMT19", + "mteb_version": "1.8.0", + "test": { + "deu-fra": { + "map_at_1": 0.79973, + "map_at_10": 0.87665, + "map_at_100": 0.87676, + "map_at_1000": 0.87676, + "map_at_20": 0.87671, + "map_at_3": 0.86558, + "map_at_5": 0.87576, + "mrr_at_1": 0.79905, + "mrr_at_10": 0.87631, + "mrr_at_100": 0.87642, + "mrr_at_1000": 0.87642, + "mrr_at_20": 0.87637, + "mrr_at_3": 0.86524, + "mrr_at_5": 0.87542, + "ndcg_at_1": 0.79973, + "ndcg_at_10": 0.90665, + "ndcg_at_100": 0.9073, + "ndcg_at_1000": 0.9073, + "ndcg_at_20": 0.90684, + "ndcg_at_3": 0.88606, + "ndcg_at_5": 0.90461, + "precision_at_1": 0.79973, + "precision_at_10": 0.09966, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.04986, + "precision_at_3": 0.315, + "precision_at_5": 0.1981, + "recall_at_1": 0.79973, + "recall_at_10": 0.99661, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 0.99728, + "recall_at_3": 0.94501, + "recall_at_5": 0.9905 + }, + "evaluation_time": 123.3, + "fra-deu": { + "map_at_1": 0.83978, + "map_at_10": 0.90748, + "map_at_100": 0.90748, + "map_at_1000": 0.90748, + "map_at_20": 0.90748, + "map_at_3": 0.90179, + "map_at_5": 0.90725, + "mrr_at_1": 0.83978, + "mrr_at_10": 0.90748, + "mrr_at_100": 0.90748, + "mrr_at_1000": 0.90748, + "mrr_at_20": 0.90748, + "mrr_at_3": 0.90179, + "mrr_at_5": 0.90725, + "ndcg_at_1": 0.83978, + "ndcg_at_10": 0.93098, + "ndcg_at_100": 0.93098, + "ndcg_at_1000": 0.93098, + "ndcg_at_20": 0.93098, + "ndcg_at_3": 0.92083, + "ndcg_at_5": 0.9305, + "precision_at_1": 0.83978, + "precision_at_10": 0.1, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.05, + "precision_at_3": 0.32519, + "precision_at_5": 0.19973, + "recall_at_1": 0.83978, + "recall_at_10": 1.0, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 1.0, + "recall_at_3": 0.97556, + "recall_at_5": 0.99864 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/historic_results/CrossLingualSemanticDiscriminationWMT21.json b/testbed/embeddings-benchmark__mteb/tests/historic_results/CrossLingualSemanticDiscriminationWMT21.json new file mode 100644 index 0000000000000000000000000000000000000000..71dd81c042553e0568888e1c10e3d7bdd2717b5b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/historic_results/CrossLingualSemanticDiscriminationWMT21.json @@ -0,0 +1,82 @@ +{ + "dataset_revision": "9627fbdb39b827ee5c066011ebe1e947cdb137bd", + "mteb_dataset_name": "CrossLingualSemanticDiscriminationWMT21", + "mteb_version": "1.8.0", + "test": { + "deu-fra": { + "map_at_1": 0.72452, + "map_at_10": 0.82167, + "map_at_100": 0.82204, + "map_at_1000": 0.82204, + "map_at_20": 0.82202, + "map_at_3": 0.8003, + "map_at_5": 0.81967, + "mrr_at_1": 0.72452, + "mrr_at_10": 0.82167, + "mrr_at_100": 0.82204, + "mrr_at_1000": 0.82204, + "mrr_at_20": 0.82202, + "mrr_at_3": 0.8003, + "mrr_at_5": 0.81967, + "ndcg_at_1": 0.72452, + "ndcg_at_10": 0.86438, + "ndcg_at_100": 0.86574, + "ndcg_at_1000": 0.86574, + "ndcg_at_20": 0.86557, + "ndcg_at_3": 0.82485, + "ndcg_at_5": 0.85984, + "precision_at_1": 0.72452, + "precision_at_10": 0.09944, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.04994, + "precision_at_3": 0.29862, + "precision_at_5": 0.19619, + "recall_at_1": 0.72452, + "recall_at_10": 0.9944, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 0.99888, + "recall_at_3": 0.89586, + "recall_at_5": 0.98096 + }, + "evaluation_time": 81.86, + "fra-deu": { + "map_at_1": 0.80067, + "map_at_10": 0.87871, + "map_at_100": 0.87871, + "map_at_1000": 0.87871, + "map_at_20": 0.87871, + "map_at_3": 0.86805, + "map_at_5": 0.87824, + "mrr_at_1": 0.80067, + "mrr_at_10": 0.87871, + "mrr_at_100": 0.87871, + "mrr_at_1000": 0.87871, + "mrr_at_20": 0.87871, + "mrr_at_3": 0.86805, + "mrr_at_5": 0.87824, + "ndcg_at_1": 0.80067, + "ndcg_at_10": 0.90915, + "ndcg_at_100": 0.90915, + "ndcg_at_1000": 0.90915, + "ndcg_at_20": 0.90915, + "ndcg_at_3": 0.88987, + "ndcg_at_5": 0.90804, + "precision_at_1": 0.80067, + "precision_at_10": 0.1, + "precision_at_100": 0.01, + "precision_at_1000": 0.001, + "precision_at_20": 0.05, + "precision_at_3": 0.31766, + "precision_at_5": 0.19933, + "recall_at_1": 0.80067, + "recall_at_10": 1.0, + "recall_at_100": 1.0, + "recall_at_1000": 1.0, + "recall_at_20": 1.0, + "recall_at_3": 0.95297, + "recall_at_5": 0.99664 + } + } +} \ No newline at end of file diff --git a/testbed/embeddings-benchmark__mteb/tests/test_ClusteringEvaluator.py b/testbed/embeddings-benchmark__mteb/tests/test_ClusteringEvaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..0769a088cddb72f4d44d76524548eb994e5c4c34 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_ClusteringEvaluator.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +from typing import List + +import numpy as np + +from mteb.evaluation.evaluators import ClusteringEvaluator + + +class TestClusteringEvaluator: + def test_clustering_v_measure(self): + class Model: + def encode(self, sentences: List[str], batch_size=32) -> np.ndarray: + return np.eye(len(sentences)) + + model = Model() + sentences = ["dog walked home", "cat walked home", "robot walked to the park"] + clusterer = ClusteringEvaluator(sentences=sentences, labels=[1, 2, 3]) + result = clusterer(model) + + assert result == {"v_measure": 1.0} diff --git a/testbed/embeddings-benchmark__mteb/tests/test_InstructionRetrievalEvaluator.py b/testbed/embeddings-benchmark__mteb/tests/test_InstructionRetrievalEvaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..d40c0b83b95c7c952fe02976145b888ccce32fac --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_InstructionRetrievalEvaluator.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from mteb.evaluation.evaluators import InstructionRetrievalEvaluator, utils + + +class TestInstructionRetrievalEvaluator: + def setup_method(self): + """Setup any state tied to the execution of the given method in a class. + + setup_method is invoked for every test method of a class. + """ + # checks that it loads + self.evaluator = InstructionRetrievalEvaluator.InstructionRetrievalEvaluator() + + def test_p_mrr(self): + changed_qrels = { + "a": ["0"], + } + + # these are the query: {"doc_id": score} + original_run = { + "a": {"0": 1, "1": 2, "2": 3, "3": 4}, + } + + new_run = { + "a": {"0": 1, "1": 2, "2": 3, "3": 4}, + } + + results = utils.evaluate_change( + original_run, + new_run, + changed_qrels, + ) + + assert results["p-MRR"] == 0.0 + + # test with a change + + new_run = { + "a": {"0": 4, "1": 1, "2": 2, "3": 3}, + } + + results = utils.evaluate_change( + original_run, + new_run, + changed_qrels, + ) + + assert results["p-MRR"] == -0.75 + + # test with a positive change + + results = utils.evaluate_change( + new_run, + original_run, + changed_qrels, + ) + + assert results["p-MRR"] == 0.75 diff --git a/testbed/embeddings-benchmark__mteb/tests/test_PairClassificationEvaluator.py b/testbed/embeddings-benchmark__mteb/tests/test_PairClassificationEvaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..0435e99526cff9e9a637cf2222f96be4be4dfa9b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_PairClassificationEvaluator.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import pytest + +from mteb.evaluation.evaluators import PairClassificationEvaluator + +TOL = 0.0001 + + +class TestPairClassificationEvaluator: + def test_accuracy(self): + scores = [6.12, 5.39, 5.28, 5.94, 6.34, 6.47, 7.88, 6.62, 8.04, 5.9] + labels = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0] + high_score_more_similar = True + acc, acc_threshold = PairClassificationEvaluator.find_best_acc_and_threshold( + scores, labels, high_score_more_similar + ) + assert acc == pytest.approx(0.9, TOL) + assert acc_threshold == pytest.approx(7.95999, TOL) + + def test_f1(self): + scores = [6.12, 5.39, 5.28, 5.94, 6.34, 6.47, 7.88, 6.62, 8.04, 5.9] + labels = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0] + high_score_more_similar = True + + f1, precision, recall, f1_threshold = ( + PairClassificationEvaluator.find_best_f1_and_threshold( + scores, labels, high_score_more_similar + ) + ) + assert f1 == pytest.approx(0.66666, TOL) + assert precision == pytest.approx(1.0, TOL) + assert recall == pytest.approx(0.5, TOL) + assert f1_threshold == pytest.approx(7.95999, TOL) + + def test_ap(self): + scores = [6.12, 5.39, 5.28, 5.94, 6.34, 6.47, 7.88, 6.62, 8.04, 5.9] + labels = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0] + high_score_more_similar = True + ap = PairClassificationEvaluator.ap_score( + scores, labels, high_score_more_similar + ) + assert ap == pytest.approx(0.7, TOL) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_RerankingEvaluator.py b/testbed/embeddings-benchmark__mteb/tests/test_RerankingEvaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..5c0cb205847fbdcb1e32013db477e2a49137ddc9 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_RerankingEvaluator.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +import pytest + +from mteb.evaluation.evaluators import RerankingEvaluator + +TOL = 0.0001 + + +class TestRerankingEvaluator: + def setup_method(self): + """Setup any state tied to the execution of the given method in a class. + + setup_method is invoked for every test method of a class. + """ + self.evaluator = RerankingEvaluator([]) + + def test_mrr_at_k(self): + is_relevant = [1, 1, 1, 0, 0, 0, 0, 0, 0] + pred_ranking = [5, 2, 6, 1, 3, 4, 7, 8, 9] + + assert self.evaluator.mrr_at_k_score( + is_relevant, pred_ranking, 10 + ) == pytest.approx(0.5, TOL) + assert self.evaluator.mrr_at_k_score( + is_relevant, pred_ranking, 3 + ) == pytest.approx(0.5, TOL) + assert self.evaluator.mrr_at_k_score( + is_relevant, pred_ranking, 1 + ) == pytest.approx(0, TOL) + + def test_map(self): + is_relevant = [1, 1, 1, 0, 0] + pred_scores = [0.75, 0.93, 0.85, 0.76, 0.75] + + assert self.evaluator.ap_score(is_relevant, pred_scores) == pytest.approx( + 0.86666, TOL + ) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_RetrievalEvaluator.py b/testbed/embeddings-benchmark__mteb/tests/test_RetrievalEvaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..81b6dc9227b3ff815948759b11c14013cde1a46a --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_RetrievalEvaluator.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from mteb.evaluation.evaluators import RetrievalEvaluator + +TOL = 0.0001 + + +class TestRetrievalEvaluator: + def setup_method(self): + """Setup any state tied to the execution of the given method in a class. + + setup_method is invoked for every test method of a class. + """ + self.evaluator = RetrievalEvaluator() + + def test_metrics_at_k(self): + # Qid: {Docid: Relevance} + relevant_docs = { + "0": {"0": 1, "1": 1}, + "1": {"1": 1}, + } + results = { + "0": {"0": 1.0, "1": 0.9, "2": 0.8}, + "1": {"0": 0.0, "1": 1.0, "2": 0.0}, + } + + ndcg, _map, recall, precision = self.evaluator.evaluate( + relevant_docs, + results, + [1, 2, 3], + ) + + assert ndcg == {"NDCG@1": 0.5, "NDCG@2": 0.30657, "NDCG@3": 0.30657} + assert _map == {"MAP@1": 0.25, "MAP@2": 0.25, "MAP@3": 0.25} + assert recall == {"Recall@1": 0.25, "Recall@2": 0.25, "Recall@3": 0.25} + assert precision == {"P@1": 0.5, "P@2": 0.25, "P@3": 0.16667} diff --git a/testbed/embeddings-benchmark__mteb/tests/test_TaskMetadata.py b/testbed/embeddings-benchmark__mteb/tests/test_TaskMetadata.py new file mode 100644 index 0000000000000000000000000000000000000000..ec514c5a7af3981e5f4694043f161179591024fa --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_TaskMetadata.py @@ -0,0 +1,406 @@ +import logging + +import pytest + +from mteb.abstasks.TaskMetadata import TaskMetadata +from mteb.overview import get_tasks + +# Historic datasets without filled metadata. Do NOT add new datasets to this list. +_HISTORIC_DATASETS = [ + "AmazonReviewsClassification", + "MasakhaNEWSClassification", + "MassiveIntentClassification", + "MassiveScenarioClassification", + "MTOPDomainClassification", + "MTOPIntentClassification", + "NordicLangClassification", + "ScalaClassification", + "NoRecClassification", + "NorwegianParliamentClassification", + "PunjabiNewsClassification", + "CBD", + "PolEmo2.0-IN", + "PolEmo2.0-OUT", + "AllegroReviews", + "PAC", + "SweRecClassification", + "TNews", + "IFlyTek", + "MultilingualSentiment", + "JDReview", + "OnlineShopping", + "Waimai", + "BlurbsClusteringP2P", + "BlurbsClusteringS2S", + "TenKGnadClusteringP2P", + "TenKGnadClusteringS2S", + "ArxivClusteringP2P", + "ArxivClusteringS2S", + "BigPatentClustering", + "BiorxivClusteringP2P", + "BiorxivClusteringS2S", + "MedrxivClusteringP2P", + "MedrxivClusteringS2S", + "RedditClustering", + "RedditClusteringP2P", + "StackExchangeClustering", + "StackExchangeClusteringP2P", + "TwentyNewsgroupsClustering", + "WikiCitiesClustering", + "AlloProfClusteringP2P", + "AlloProfClusteringS2S", + "HALClusteringS2S", + "MLSUMClusteringP2P", + "MLSUMClusteringS2S", + "MasakhaNEWSClusteringP2P", + "MasakhaNEWSClusteringS2S", + "SNLClustering", + "VGClustering", + "EightTagsClustering", + "RomaniBibleClustering", + "FloresClusteringS2S", + "SpanishNewsClusteringP2P", + "SwednClustering", + "CLSClusteringS2S", + "CLSClusteringP2P", + "ThuNewsClusteringS2S", + "ThuNewsClusteringP2P", + "TV2Nordretrieval", + "TwitterHjerneRetrieval", + "GerDaLIR", + "GerDaLIRSmall", + "GermanDPR", + "GermanQuAD-Retrieval", + "LegalQuAD", + "AILACasedocs", + "AILAStatutes", + "ArguAna", + "ClimateFEVER", + "CQADupstackAndroidRetrieval", + "CQADupstackEnglishRetrieval", + "CQADupstackGamingRetrieval", + "CQADupstackGisRetrieval", + "CQADupstackMathematicaRetrieval", + "CQADupstackPhysicsRetrieval", + "CQADupstackProgrammersRetrieval", + "CQADupstackStatsRetrieval", + "CQADupstackTexRetrieval", + "CQADupstackUnixRetrieval", + "CQADupstackWebmastersRetrieval", + "CQADupstackWordpressRetrieval", + "DBPedia", + "FEVER", + "FiQA2018", + "HagridRetrieval", + "HotpotQA", + "LegalBenchConsumerContractsQA", + "LegalBenchCorporateLobbying", + "LegalSummarization", + "LEMBNeedleRetrieval", + "LEMBPasskeyRetrieval", + "MSMARCO", + "MSMARCOv2", + "NarrativeQARetrieval", + "NFCorpus", + "NQ", + "QuoraRetrieval", + "SCIDOCS", + "SciFact", + "Touche2020", + "TRECCOVID", + "AlloprofRetrieval", + "BSARDRetrieval", + "SyntecRetrieval", + "JaQuADRetrieval", + "Ko-miracl", + "Ko-StrategyQA", + "MintakaRetrieval", + "MIRACLRetrieval", + "MultiLongDocRetrieval", + "XMarket", + "SNLRetrieval", + "ArguAna-PL", + "DBPedia-PL", + "FiQA-PL", + "HotpotQA-PL", + "MSMARCO-PL", + "NFCorpus-PL", + "NQ-PL", + "Quora-PL", + "SCIDOCS-PL", + "SciFact-PL", + "TRECCOVID-PL", + "SpanishPassageRetrievalS2P", + "SpanishPassageRetrievalS2S", + "SweFaqRetrieval", + "T2Retrieval", + "MMarcoRetrieval", + "DuRetrieval", + "CovidRetrieval", + "CmedqaRetrieval", + "EcomRetrieval", + "MedicalRetrieval", + "VideoRetrieval", + "LeCaRDv2", + "SprintDuplicateQuestions", + "TwitterSemEval2015", + "TwitterURLCorpus", + "OpusparcusPC", + "PawsX", + "SICK-E-PL", + "PpcPC", + "CDSC-E", + "PSC", + "Ocnli", + "Cmnli", + "AskUbuntuDupQuestions", + "MindSmallReranking", + "SciDocsRR", + "StackOverflowDupQuestions", + "AlloprofReranking", + "SyntecReranking", + "MIRACLReranking", + "T2Reranking", + "MMarcoReranking", + "CMedQAv1-reranking", + "CMedQAv2-reranking", + "GermanSTSBenchmark", + "BIOSSES", + "SICK-R", + "STS12", + "STS13", + "STS14", + "STS15", + "STS16", + "STSBenchmark", + "FinParaSTS", + "SICKFr", + "KLUE-STS", + "KorSTS", + "STS17", + "STS22", + "STSBenchmarkMultilingualSTS", + "SICK-R-PL", + "CDSC-R", + "RonSTS", + "STSES", + "ATEC", + "BQ", + "LCQMC", + "PAWSX", + "STSB", + "AFQMC", + "QBQTC", + "SummEval", + "SummEvalFr", + "ArxivClusteringP2P.v2", + "SwednClusteringP2P", + "SwednClusteringS2S", + "MalayalamNewsClassification", + "TamilNewsClassification", + "ArxivClusteringP2P.v3", + "TenKGnadClusteringP2P.v2", + "TenKGnadClusteringS2S.v2", +] + + +def test_given_dataset_config_then_it_is_valid(): + my_task = TaskMetadata( + name="MyTask", + dataset={ + "path": "test/dataset", + "revision": "1.0", + }, + description="testing", + reference=None, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + license=None, + task_subtypes=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation="", + avg_character_length=None, + n_samples=None, + ) + assert my_task.dataset["path"] == "test/dataset" + assert my_task.dataset["revision"] == "1.0" + + +def test_given_missing_dataset_path_then_it_throws(): + with pytest.raises(ValueError): + TaskMetadata( + name="MyTask", + description="testing", + reference=None, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + license=None, + task_subtypes=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation="", + avg_character_length=None, + n_samples=None, + ) + + +def test_given_missing_revision_path_then_it_throws(): + with pytest.raises(ValueError): + TaskMetadata( + name="MyTask", + dataset={ + "path": "test/dataset", + }, + description="testing", + reference=None, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + license=None, + task_subtypes=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation="", + avg_character_length=None, + n_samples=None, + ) + + +def test_given_none_revision_path_then_it_logs_warning(caplog): + with caplog.at_level(logging.WARNING): + my_task = TaskMetadata( + name="MyTask", + dataset={"path": "test/dataset", "revision": None}, + description="testing", + reference=None, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + license=None, + task_subtypes=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation="", + avg_character_length=None, + n_samples=None, + ) + + assert my_task.dataset["revision"] is None + + warning_logs = [ + record for record in caplog.records if record.levelname == "WARNING" + ] + assert len(warning_logs) == 1 + assert ( + warning_logs[0].message == "Revision missing for the dataset test/dataset. " + "It is encourage to specify a dataset revision for reproducability." + ) + + +def test_unfilled_metadata_is_not_filled(): + assert ( + TaskMetadata( + name="MyTask", + dataset={ + "path": "test/dataset", + "revision": "1.0", + }, + description="testing", + reference=None, + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=None, + form=None, + domains=None, + license=None, + task_subtypes=None, + socioeconomic_status=None, + annotations_creators=None, + dialect=None, + text_creation=None, + bibtex_citation="", + avg_character_length=None, + n_samples=None, + ).is_filled() + is False + ) + + +def test_filled_metadata_is_filled(): + assert ( + TaskMetadata( + name="MyTask", + dataset={ + "path": "test/dataset", + "revision": "1.0", + }, + description="testing", + reference="https://aclanthology.org/W19-6138/", + type="Classification", + category="s2s", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="map", + date=("2021-01-01", "2021-12-31"), + form=["written"], + domains=["Non-fiction"], + license="mit", + task_subtypes=["Thematic clustering"], + socioeconomic_status="high", + annotations_creators="expert-annotated", + dialect=[], + text_creation="found", + bibtex_citation="Someone et al", + avg_character_length={"train": 1}, + n_samples={"train": 1}, + ).is_filled() + is True + ) + + +def test_all_metadata_is_filled(): + all_tasks = get_tasks() + + unfilled_metadata = [] + for task in all_tasks: + if task.metadata.name not in _HISTORIC_DATASETS: + if not task.metadata.is_filled(): + unfilled_metadata.append(task.metadata.name) + if unfilled_metadata: + raise ValueError( + f"The metadata of the following datasets is not filled: {unfilled_metadata}" + ) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_all_abstasks.py b/testbed/embeddings-benchmark__mteb/tests/test_all_abstasks.py new file mode 100644 index 0000000000000000000000000000000000000000..d9549a01a1de2e43333b4a061bb1361dd9492145 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_all_abstasks.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import asyncio +import logging +from unittest.mock import Mock, patch + +import aiohttp +import pytest + +import mteb +from mteb import MTEB +from mteb.abstasks import AbsTask +from mteb.abstasks.AbsTaskInstructionRetrieval import AbsTaskInstructionRetrieval +from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval +from mteb.abstasks.MultiSubsetLoader import MultiSubsetLoader +from mteb.overview import TASKS_REGISTRY + +logging.basicConfig(level=logging.INFO) + +tasks = MTEB().tasks_cls + + +@pytest.mark.parametrize("task", tasks) +@patch("datasets.load_dataset") +@patch("datasets.concatenate_datasets") +def test_load_data( + mock_concatenate_datasets: Mock, mock_load_dataset: Mock, task: AbsTask +): + # TODO: We skip because this load_data is completely different. + if ( + isinstance(task, AbsTaskRetrieval) + or isinstance(task, AbsTaskInstructionRetrieval) + or isinstance(task, MultiSubsetLoader) + ): + pytest.skip() + with patch.object(task, "dataset_transform") as mock_dataset_transform: + task.load_data() + mock_load_dataset.assert_called() + + # They don't yet but should they so they can be expanded more easily? + if not task.is_crosslingual and not task.is_multilingual: + mock_dataset_transform.assert_called_once() + + +async def check_dataset_on_hf( + session: aiohttp.ClientSession, dataset: str, revision: str +) -> bool: + url = f"https://huggingface.co/datasets/{dataset}/tree/{revision}" + async with session.head(url) as response: + return response.status == 200 + + +async def check_datasets_are_available_on_hf(tasks): + does_not_exist = [] + async with aiohttp.ClientSession() as session: + tasks_checks = [ + check_dataset_on_hf( + session, + task.metadata.dataset["path"], + task.metadata.dataset["revision"], + ) + for task in tasks + ] + datasets_exists = await asyncio.gather(*tasks_checks) + + for task, ds_exists in zip(tasks, datasets_exists): + if not ds_exists: + does_not_exist.append( + (task.metadata.dataset["path"], task.metadata.dataset["revision"]) + ) + + if does_not_exist: + pretty_print = "\n".join( + [f"{ds[0]} - revision {ds[1]}" for ds in does_not_exist] + ) + assert False, f"Datasets not available on Hugging Face:\n{pretty_print}" + + +def test_dataset_availability(): + """Checks if the datasets are available on Hugging Face using both their name and revision.""" + tasks = MTEB().tasks_cls + asyncio.run(check_datasets_are_available_on_hf(tasks)) + + +def test_superseeded_dataset_exists(): + tasks = mteb.get_tasks(exclude_superseeded=False) + for task in tasks: + if task.superseeded_by: + assert ( + task.superseeded_by in TASKS_REGISTRY + ), f"{task} is superseeded by {task.superseeded_by} but {task.superseeded_by} is not in the TASKS_REGISTRY" diff --git a/testbed/embeddings-benchmark__mteb/tests/test_cli.py b/testbed/embeddings-benchmark__mteb/tests/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..51c638b401d80f381a3a91e8dfa633f6a25b9272 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_cli.py @@ -0,0 +1,32 @@ +"""tests for the MTEB CLI""" + +import subprocess +from pathlib import Path + + +def test_available_tasks(): + command = "mteb --available_tasks" + result = subprocess.run(command, shell=True, capture_output=True, text=True) + assert result.returncode == 0, "Command failed" + assert ( + "Banking77Classification" in result.stdout + ), "Sample task Banking77Classification task not found in available tasks" + + +def test_run_task( + model_name: str = "average_word_embeddings_komninos", + task_name="BornholmBitextMining", +): + command = f"mteb -m {model_name} -t {task_name} --verbosity 3 --output_folder tests/results/test_model" + result = subprocess.run(command, shell=True, capture_output=True, text=True) + assert result.returncode == 0, "Command failed" + + path = Path("tests/results/test_model") + assert path.exists(), "Output folder not created" + json_files = list(path.glob("*.json")) + assert "model_meta.json" in [ + f.name for f in json_files + ], "model_meta.json not found in output folder" + assert f"{task_name}.json" in [ + f.name for f in json_files + ], f"{task_name} not found in output folder" diff --git a/testbed/embeddings-benchmark__mteb/tests/test_encoder_interfaces.py b/testbed/embeddings-benchmark__mteb/tests/test_encoder_interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..42a308435ff160fbfd4033c7ebb21d34aa94dec3 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_encoder_interfaces.py @@ -0,0 +1,17 @@ +from sentence_transformers import SentenceTransformer + +from mteb.encoder_interface import Encoder, EncoderWithQueryCorpusEncode +from mteb.evaluation.evaluators.RetrievalEvaluator import DRESModel + + +def test_sentence_is_encoder(): + model = SentenceTransformer("average_word_embeddings_komninos") + assert isinstance(model, Encoder) + + +def test_wrapped_sentence_is_encoder_with_query_corpus_encode(): + model = SentenceTransformer("average_word_embeddings_komninos") + model = DRESModel(model) + + assert isinstance(model, Encoder) + assert isinstance(model, EncoderWithQueryCorpusEncode) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_langscripts.py b/testbed/embeddings-benchmark__mteb/tests/test_langscripts.py new file mode 100644 index 0000000000000000000000000000000000000000..f4e4ab9482552836e3e3a1e9de2ce2d60f488c3c --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_langscripts.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +import pytest +from attr import dataclass + +from mteb.languages import LanguageScripts + + +@dataclass +class LangScriptTestCase: + args: dict + contains_language: list[str] + not_contains_language: list[str] + contains_script: list[str] + not_contains_script: list[str] + + +test_cases = [ + LangScriptTestCase( + args=dict(languages=["fra"], scripts=None), + contains_language=["fra", "fra-Latn"], + not_contains_language=["eng"], + contains_script=[], + not_contains_script=["Latn"], + ), + LangScriptTestCase( + args=dict(languages=["fra", "eng"], scripts=["Latn"]), + contains_language=["fra", "fra-Latn", "eng", "eng-Latn"], + not_contains_language=["deu"], + contains_script=["Latn"], + not_contains_script=["Cyrl"], + ), + LangScriptTestCase( + args=dict(languages=["fra-Latn"]), + contains_language=["fra", "fra-Latn"], + not_contains_language=["eng", "eng-Latn"], + contains_script=["Latn"], + not_contains_script=["Cyrl"], + ), +] + + +@pytest.mark.parametrize("test_case", test_cases) +def test_langscripts(test_case: LangScriptTestCase): + langscripts = LanguageScripts.from_languages_and_scripts(**test_case.args) + + for lang in test_case.contains_language: + assert langscripts.contains_language(lang) + + for lang in test_case.not_contains_language: + assert not langscripts.contains_language(lang) + + for script in test_case.contains_script: + assert langscripts.contains_script(script) + + for script in test_case.not_contains_script: + assert not langscripts.contains_script(script) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_mteb.py b/testbed/embeddings-benchmark__mteb/tests/test_mteb.py new file mode 100644 index 0000000000000000000000000000000000000000..3db69627304f9a6c93c224244d97db28d539332f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_mteb.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import logging +from typing import Union + +import pytest +from sentence_transformers import SentenceTransformer + +from mteb import MTEB +from mteb.abstasks import AbsTask +from mteb.tasks.BitextMining.dan.BornholmskBitextMining import BornholmBitextMining + +logging.basicConfig(level=logging.INFO) + + +def test_two_mteb_tasks(): + """Test that two tasks can be fetched and run""" + model = SentenceTransformer("average_word_embeddings_komninos") + eval = MTEB( + tasks=[ + "STS12", + "SummEval", + ] + ) + eval.run(model, output_folder="tests/results", overwrite_results=True) + + +@pytest.mark.parametrize( + "task", + [ + BornholmBitextMining(), + "TwentyNewsgroupsClustering", + "TwentyNewsgroupsClustering.v2", + "Banking77Classification", + "SciDocsRR", + "SprintDuplicateQuestions", + "NFCorpus", + "MalteseNewsClassification", + "STS12", + "SummEval", + ], +) +@pytest.mark.parametrize( + "model_name", + [ + "average_word_embeddings_levy_dependency", + ], +) +def test_mteb_task(task: Union[str, AbsTask], model_name: str): + """Test that a task can be fetched and run""" + model = SentenceTransformer(model_name) + eval = MTEB(tasks=[task]) + eval.run(model, output_folder="tests/results", overwrite_results=True) + + +def test_all_tasks_fetch(): + """Test that all tasks can be fetched""" + MTEB.mteb_tasks() diff --git a/testbed/embeddings-benchmark__mteb/tests/test_mteb_rerank.py b/testbed/embeddings-benchmark__mteb/tests/test_mteb_rerank.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1af18dabb791b598d4b72b334adc7e3d7be88f --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_mteb_rerank.py @@ -0,0 +1,392 @@ +from __future__ import annotations + +import json +import logging +import os + +from sentence_transformers import CrossEncoder, SentenceTransformer + +from mteb import MTEB + +logging.basicConfig(level=logging.INFO) + + +def test_mteb_rerank(): + # Test that reranking works + # unfortunately, we need all the query ids to pretend to have this + scifact_keys = [ + "1", + "3", + "5", + "13", + "36", + "42", + "48", + "49", + "50", + "51", + "53", + "54", + "56", + "57", + "70", + "72", + "75", + "94", + "99", + "100", + "113", + "115", + "118", + "124", + "127", + "128", + "129", + "130", + "132", + "133", + "137", + "141", + "142", + "143", + "146", + "148", + "163", + "171", + "179", + "180", + "183", + "185", + "198", + "208", + "212", + "213", + "216", + "217", + "218", + "219", + "230", + "232", + "233", + "236", + "237", + "238", + "239", + "248", + "249", + "261", + "268", + "269", + "274", + "275", + "279", + "294", + "295", + "298", + "300", + "303", + "312", + "314", + "324", + "327", + "338", + "343", + "350", + "354", + "362", + "380", + "384", + "385", + "386", + "388", + "399", + "410", + "411", + "415", + "421", + "431", + "436", + "437", + "439", + "440", + "443", + "452", + "475", + "478", + "491", + "501", + "502", + "507", + "508", + "513", + "514", + "516", + "517", + "521", + "525", + "527", + "528", + "532", + "533", + "535", + "536", + "539", + "540", + "544", + "549", + "551", + "552", + "554", + "560", + "569", + "575", + "577", + "578", + "587", + "589", + "593", + "597", + "598", + "613", + "619", + "623", + "628", + "636", + "637", + "641", + "644", + "649", + "659", + "660", + "674", + "684", + "690", + "691", + "692", + "693", + "700", + "702", + "715", + "716", + "718", + "721", + "723", + "727", + "728", + "729", + "742", + "743", + "744", + "756", + "759", + "768", + "770", + "775", + "781", + "783", + "784", + "785", + "793", + "800", + "805", + "808", + "811", + "814", + "820", + "821", + "823", + "830", + "831", + "832", + "834", + "837", + "839", + "845", + "847", + "852", + "859", + "870", + "873", + "879", + "880", + "882", + "887", + "903", + "904", + "907", + "911", + "913", + "914", + "921", + "922", + "936", + "956", + "957", + "960", + "967", + "971", + "975", + "982", + "985", + "993", + "1012", + "1014", + "1019", + "1020", + "1021", + "1024", + "1029", + "1041", + "1049", + "1062", + "1086", + "1088", + "1089", + "1099", + "1100", + "1104", + "1107", + "1110", + "1121", + "1130", + "1132", + "1137", + "1140", + "1144", + "1146", + "1150", + "1163", + "1175", + "1179", + "1180", + "1185", + "1187", + "1191", + "1194", + "1196", + "1197", + "1199", + "1200", + "1202", + "1204", + "1207", + "1213", + "1216", + "1221", + "1225", + "1226", + "1232", + "1241", + "1245", + "1259", + "1262", + "1266", + "1270", + "1271", + "1272", + "1273", + "1274", + "1278", + "1279", + "1280", + "1281", + "1282", + "1290", + "1292", + "1298", + "1303", + "1316", + "1319", + "1320", + "1332", + "1335", + "1336", + "1337", + "1339", + "1344", + "1352", + "1359", + "1362", + "1363", + "1368", + "1370", + "1379", + "1382", + "1385", + "1389", + "1395", + ] + model = CrossEncoder("cross-encoder/ms-marco-TinyBERT-L-2-v2") + eval = MTEB( + tasks=[ + "SciFact", + ] + ) + # create fake first stage results + with open("tmp.json", "w") as f: + f.write( + json.dumps( + { + i: { + # just two random documents so we can see it works + "4983": 0.1, + "18670": 0.9, + "19238": 0.01, + } + for i in scifact_keys + } + ) + ) + eval.run( + model, + output_folder="tests/results", + overwrite_results=True, + eval_splits=["test"], + top_k=2, + previous_results="tmp.json", + save_predictions=True, + ) + os.remove("tmp.json") + + # read in the results + with open("tests/results/SciFact_default_predictions.json") as f: + results = json.load(f) + + # check that only the top two results are re-orderd + assert "19238" not in results["1"] + assert "4983" in results["1"] + assert "18670" in results["1"] + + +def test_reranker_same_ndcg1(): + de = SentenceTransformer("average_word_embeddings_komninos") + ce = CrossEncoder("cross-encoder/ms-marco-TinyBERT-L-2-v2") + eval = MTEB(tasks=["SciFact"]) + eval.run( + de, + output_folder="tests/results/stage1", + overwrite_results=True, + save_predictions=True, + eval_splits=["test"], + ) + eval.run( + ce, + output_folder="tests/results/stage2", + overwrite_results=True, + previous_results="tests/results/stage1/SciFact_default_predictions.json", + save_predictions=False, + eval_splits=["test"], + top_k=1, # don't allow it to rerank more than 1 so we can check for top_1 being the same + ) + + # read in stage 1 and stage two and check ndcg@1 is the same + with open("tests/results/stage1/SciFact.json") as f: + stage1 = json.load(f) + + with open("tests/results/stage2/SciFact.json") as f: + stage2 = json.load(f) + + assert ( + stage1["scores"]["test"][0]["ndcg_at_1"] + == stage2["scores"]["test"][0]["ndcg_at_1"] + ) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_mteb_results.py b/testbed/embeddings-benchmark__mteb/tests/test_mteb_results.py new file mode 100644 index 0000000000000000000000000000000000000000..466326ae9ce6d7db1d46bf6430c83584263ac05d --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_mteb_results.py @@ -0,0 +1,100 @@ +from importlib.metadata import version +from pathlib import Path + +import pytest + +import mteb +from mteb import AbsTask +from mteb.MTEBResults import MTEBResults + +tests_folder = Path(__file__).parent + + +class DummyTask(AbsTask): + superseeded_by = "newer_task" + metadata = mteb.TaskMetadata( + name="dummy_task", + description="dummy task for testing", + dataset={"revision": "1.0", "path": "dummy_dataset"}, + type="Classification", + category="p2p", + eval_langs={ + "en-de": ["eng-Latn", "deu-Latn"], + "en-fr": ["eng-Latn", "fra-Latn"], + }, + main_score="main_score", + eval_splits=["train"], + domains=[], + text_creation="created", + reference="https://www.noreference.com", + date=("2024-05-02", "2024-05-03"), + form=[], + task_subtypes=[], + license="mit", + socioeconomic_status="mixed", + annotations_creators="derived", + dialect=[], + bibtex_citation="", + avg_character_length={}, + n_samples={}, + ) + + def evaluate(self, model, split: str = "test"): + pass + + def _evaluate_subset(self, **kwargs): + pass + + +def test_mteb_results(): + """Test MTEBResults class (this is the same as the example in the docstring)""" + scores = { + "train": { + "en-de": { + "main_score": 0.5, + }, + "en-fr": { + "main_score": 0.6, + }, + }, + } + + evaluation_time = 100 + + mteb_results = MTEBResults.from_task_results( + task=DummyTask(), scores=scores, evaluation_time=evaluation_time + ) + + assert mteb_results.get_score() == 0.55 + assert mteb_results.get_score(languages=["eng"]) == 0.55 + assert mteb_results.get_score(languages=["fra"]) == 0.6 + dict_repr = { + "dataset_revision": "1.0", + "task_name": "dummy_task", + "mteb_version": version("mteb"), + "evaluation_time": 100, + "kg_co2_emissions": None, + "scores": { + "train": [ + { + "main_score": 0.5, + "hf_subset": "en-de", + "languages": ["eng-Latn", "deu-Latn"], + }, + { + "main_score": 0.6, + "hf_subset": "en-fr", + "languages": ["eng-Latn", "fra-Latn"], + }, + ] + }, + } + assert mteb_results.to_dict() == dict_repr + + +@pytest.mark.parametrize( + "path", list((tests_folder / "historic_results").glob("*.json")) +) +def test_mteb_results_from_historic(path: Path): + mteb_result = MTEBResults.from_disk(path, load_historic_data=True) + assert isinstance(mteb_result, MTEBResults) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_overview.py b/testbed/embeddings-benchmark__mteb/tests/test_overview.py new file mode 100644 index 0000000000000000000000000000000000000000..3897f06f0e523215a28f6ca3faabfdab0eb6ed09 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_overview.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import pytest + +import mteb +from mteb import get_tasks +from mteb.abstasks.TaskMetadata import TASK_DOMAIN, TASK_TYPE +from mteb.overview import MTEBTasks + + +def test_get_tasks_size_differences(): + assert len(get_tasks()) > 0 + assert len(get_tasks()) >= len(get_tasks(languages=["eng"])) + assert len(get_tasks()) >= len(get_tasks(script=["Latn"])) + assert len(get_tasks()) >= len(get_tasks(domains=["Legal"])) + assert len(get_tasks()) >= len(get_tasks(languages=["eng", "deu"])) + assert len(get_tasks(languages=["eng", "deu"])) >= len( + get_tasks(languages=["eng", "deu"]) + ) + + +@pytest.mark.parametrize("languages", [["eng", "deu"], ["eng"], None]) +@pytest.mark.parametrize("script", [["Latn"], ["Cyrl"], None]) +@pytest.mark.parametrize("domains", [["Legal"], ["Medical", "Non-fiction"], None]) +@pytest.mark.parametrize("task_types", [["Classification"], ["Clustering"], None]) +@pytest.mark.parametrize("exclude_superseeded_datasets", [True, False]) +def test_get_task( + languages: list[str], + script: list[str], + domains: list[TASK_DOMAIN], + task_types: list[TASK_TYPE] | None, + exclude_superseeded_datasets: bool, +): + tasks = mteb.get_tasks( + languages=languages, + script=script, + domains=domains, + task_types=task_types, + exclude_superseeded=exclude_superseeded_datasets, + ) + + for task in tasks: + if languages: + assert set(languages).intersection(task.metadata.languages) + if script: + assert set(script).intersection(task.metadata.scripts) + if domains: + task_domains = ( + set(task.metadata.domains) if task.metadata.domains else set() + ) + assert set(domains).intersection(set(task_domains)) + if task_types: + assert task.metadata.type in task_types + if exclude_superseeded_datasets: + assert task.superseeded_by is None + + +def test_get_tasks_filtering(): + """Tests that get_tasks filters tasks for languages within the task, i.e. that a multilingual task returns only relevant subtasks for the + specified languages + """ + tasks = get_tasks(languages=["eng"]) + + for task in tasks: + if task.is_multilingual: + assert isinstance(task.metadata.eval_langs, dict) + + for hf_subset in task.hf_subsets: + assert "eng-Latn" in task.metadata.eval_langs[hf_subset] + + +@pytest.mark.parametrize("script", [["Latn"], ["Cyrl"], None]) +@pytest.mark.parametrize("task_types", [["Classification"], ["Clustering"], None]) +def test_MTEBTasks( + script: list[str], + task_types: list[TASK_TYPE] | None, +): + tasks = mteb.get_tasks(script=script, task_types=task_types) + assert isinstance(tasks, MTEBTasks) + langs = tasks.languages + for t in tasks: + assert len(langs.intersection(t.languages)) > 0 + + # check for header of a table + n_langs = len(tasks) + assert len(tasks.to_markdown().split("\n")) - 3 == n_langs diff --git a/testbed/embeddings-benchmark__mteb/tests/test_reproducible_workflow.py b/testbed/embeddings-benchmark__mteb/tests/test_reproducible_workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc3dace5769626b0d2abf7c6472a42109ad9d8b --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_reproducible_workflow.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import logging + +import pytest + +import mteb +from mteb import MTEB +from mteb.encoder_interface import Encoder, EncoderWithQueryCorpusEncode +from mteb.model_meta import ModelMeta + +logging.basicConfig(level=logging.INFO) + + +@pytest.mark.parametrize("task_name", ["BornholmBitextMining"]) +@pytest.mark.parametrize("model_name", ["sentence-transformers/all-MiniLM-L6-v2"]) +def test_reproducibility_workflow(task_name: str, model_name: str): + """Test that a model and a task can be fetched and run in a reproducible fashion.""" + model_meta = mteb.get_model_meta(model_name) + task = mteb.get_task(task_name) + + assert isinstance(model_meta, ModelMeta) + assert isinstance(task, mteb.AbsTask) + + model = mteb.get_model(model_name) + assert isinstance(model, (Encoder, EncoderWithQueryCorpusEncode)) + + eval = MTEB(tasks=[task]) + eval.run(model, output_folder="tests/results", overwrite_results=True) diff --git a/testbed/embeddings-benchmark__mteb/tests/test_retrieval_abstask.py b/testbed/embeddings-benchmark__mteb/tests/test_retrieval_abstask.py new file mode 100644 index 0000000000000000000000000000000000000000..af787fa16ee0045f7fe610305d68ea8185d13c85 --- /dev/null +++ b/testbed/embeddings-benchmark__mteb/tests/test_retrieval_abstask.py @@ -0,0 +1,9 @@ +import pytest + +from mteb.abstasks import AbsTaskRetrieval +from mteb.tasks.Retrieval.eng.NFCorpusRetrieval import NFCorpus + + +@pytest.mark.parametrize("task", [NFCorpus()]) +def test_abstask_calculate_metadata_metrics(task: AbsTaskRetrieval): + task.calculate_metadata_metrics()