Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/NorwegianCourtsBitextMining.py +45 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/norwegian_courts_bitext_mining.py +36 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/srn/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/VieMedEVBitextMining.py +78 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/__init__.py +133 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/AJGT.py +42 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/HotelReviewSentimentClassification.py +49 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/TweetSarcasmClassification.py +60 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliDocumentClassification.py +56 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliHateSpeechClassification.py +45 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliSentimentAnalysis.py +44 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/BulgarianStoreReviewSentimentClassfication.py +54 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CSFDCZMovieReviewSentimentClassification.py +59 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechProductReviewSentimentClassification.py +65 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSoMeSentimentClassification.py +62 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSubjectivityClassification.py +45 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/AngryTweetsClassification.py +46 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DKHateClassification.py +76 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DanishPoliticalCommentsClassification.py +50 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DdiscoCohesionClassification.py +67 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/LccSentimentClassification.py +59 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/GermanPoliticiansTwitterSentimentClassification.py +58 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/TenKGnadClassification.py +45 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ell/GreekLegalCodeClassification.py +57 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/AmazonPolarityClassification.py +43 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ArxivClassification.py +43 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/Banking77Classification.py +60 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/DBpediaClassification.py +52 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/EmotionClassification.py +64 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FinancialPhrasebankClassification.py +46 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FrenkEnClassification.py +41 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ImdbClassification.py +55 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/LegalBenchClassification.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/NewsClassification.py +37 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/PatentClassification.py +55 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicChatClassification.py +67 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicConversationsClassification.py +55 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetSentimentExtractionClassification.py +50 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetTopicSingleClassification.py +56 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fas/__init__.py +0 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoHateSpeechClassification.py +50 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoShopeeReviewsClassification.py +44 -0
- testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/por/__init__.py +0 -0
testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/NorwegianCourtsBitextMining.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskBitextMining
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class NorwegianCourtsBitextMining(AbsTaskBitextMining):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="NorwegianCourtsBitextMining",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "kardosdrur/norwegian-courts",
|
| 12 |
+
"revision": "d79af07e969a6678fcbbe819956840425816468f",
|
| 13 |
+
},
|
| 14 |
+
description="Nynorsk and Bokmål parallel corpus from Norwegian courts. Norwegian courts have two standardised written languages. Bokmål is a variant closer to Danish, while Nynorsk was created to resemble regional dialects of Norwegian.",
|
| 15 |
+
reference="https://opus.nlpl.eu/index.php",
|
| 16 |
+
type="BitextMining",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["nob-Latn", "nno-Latn"],
|
| 20 |
+
main_score="f1",
|
| 21 |
+
date=("2020-01-01", "2020-12-31"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Legal"],
|
| 24 |
+
task_subtypes=[],
|
| 25 |
+
license="CC BY 4.0",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="human-annotated",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""
|
| 31 |
+
@inproceedings{opus4,
|
| 32 |
+
title={OPUS-MT — Building open translation services for the World},
|
| 33 |
+
author={Tiedemann, J{\"o}rg and Thottingal, Santhosh},
|
| 34 |
+
booktitle={Proceedings of the 22nd Annual Conference of the European Association for Machine Translation (EAMT)},
|
| 35 |
+
year={2020}
|
| 36 |
+
}
|
| 37 |
+
""",
|
| 38 |
+
n_samples={"test": 2050},
|
| 39 |
+
avg_character_length={"test": 1884.0},
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def dataset_transform(self):
|
| 43 |
+
# Convert to standard format
|
| 44 |
+
self.dataset = self.dataset.rename_column("nb", "sentence1")
|
| 45 |
+
self.dataset = self.dataset.rename_column("nn", "sentence2")
|
testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/multilingual/norwegian_courts_bitext_mining.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mteb.abstasks import AbsTaskBitextMining
|
| 2 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class NorwegianCourtsBitextMining(AbsTaskBitextMining):
|
| 6 |
+
metadata = TaskMetadata(
|
| 7 |
+
name="NorwegianCourtsBitextMining",
|
| 8 |
+
dataset={
|
| 9 |
+
"path": "kardosdrur/norwegian-courts",
|
| 10 |
+
"revision": "d79af07e969a6678fcbbe819956840425816468f",
|
| 11 |
+
},
|
| 12 |
+
description="Nynorsk and Bokmål parallel corpus from Norwegian courts. ",
|
| 13 |
+
reference="https://opus.nlpl.eu/ELRC-Courts_Norway-v1.php",
|
| 14 |
+
type="BitextMining",
|
| 15 |
+
category="s2s",
|
| 16 |
+
eval_splits=["test"],
|
| 17 |
+
eval_langs=["nno-Latn", "nob-Latn"],
|
| 18 |
+
main_score="accuracy",
|
| 19 |
+
date=("2000-01-01", "2020-12-31"), # approximate guess
|
| 20 |
+
form=["spoken"],
|
| 21 |
+
domains=["Spoken", "Legal"],
|
| 22 |
+
task_subtypes=["Dialect pairing"],
|
| 23 |
+
license="openUnder-PSI",
|
| 24 |
+
socioeconomic_status="high",
|
| 25 |
+
annotations_creators="derived", # best guess
|
| 26 |
+
dialect=[],
|
| 27 |
+
text_creation="found",
|
| 28 |
+
bibtex_citation="",
|
| 29 |
+
n_samples={"test": 456},
|
| 30 |
+
avg_character_length={"test": 82.11},
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
def dataset_transform(self) -> None:
|
| 34 |
+
# Convert to standard format
|
| 35 |
+
self.dataset = self.dataset.rename_column("nb", "sentence1")
|
| 36 |
+
self.dataset = self.dataset.rename_column("nn", "sentence2")
|
testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/srn/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/VieMedEVBitextMining.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
import datasets
|
| 6 |
+
|
| 7 |
+
from mteb.abstasks import AbsTaskBitextMining
|
| 8 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 9 |
+
|
| 10 |
+
TEST_SAMPLES = 2048
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class VieMedEVBitextMining(AbsTaskBitextMining):
|
| 14 |
+
metadata = TaskMetadata(
|
| 15 |
+
name="VieMedEVBitextMining",
|
| 16 |
+
dataset={
|
| 17 |
+
"path": "nhuvo/MedEV",
|
| 18 |
+
"revision": "d03c69413bc53d1cea5a5375b3a953c4fee35ecd",
|
| 19 |
+
"trust_remote_code": True,
|
| 20 |
+
},
|
| 21 |
+
description="A high-quality Vietnamese-English parallel data from the medical domain for machine translation",
|
| 22 |
+
reference="https://aclanthology.org/2015.iwslt-evaluation.11/",
|
| 23 |
+
type="BitextMining",
|
| 24 |
+
category="s2s",
|
| 25 |
+
eval_splits=["test"],
|
| 26 |
+
eval_langs=["eng-Latn", "vie-Latn"],
|
| 27 |
+
main_score="f1",
|
| 28 |
+
date=("2024-08-28", "2022-03-28"),
|
| 29 |
+
form=["written"],
|
| 30 |
+
domains=["Medical"],
|
| 31 |
+
task_subtypes=[],
|
| 32 |
+
license="cc-by-nc",
|
| 33 |
+
socioeconomic_status="high",
|
| 34 |
+
annotations_creators="expert-annotated",
|
| 35 |
+
dialect=[],
|
| 36 |
+
text_creation="human-translated and localized",
|
| 37 |
+
bibtex_citation="""@inproceedings{medev,
|
| 38 |
+
title = {{Improving Vietnamese-English Medical Machine Translation}},
|
| 39 |
+
author = {Nhu Vo and Dat Quoc Nguyen and Dung D. Le and Massimo Piccardi and Wray Buntine},
|
| 40 |
+
booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING)},
|
| 41 |
+
year = {2024}
|
| 42 |
+
}""",
|
| 43 |
+
n_samples={"test": TEST_SAMPLES},
|
| 44 |
+
avg_character_length={"test": 139.23},
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def dataset_transform(self):
|
| 48 |
+
# Convert to standard format
|
| 49 |
+
ds = {}
|
| 50 |
+
seed = 42
|
| 51 |
+
random.seed(seed)
|
| 52 |
+
# Get all texts
|
| 53 |
+
all_texts = self.dataset["test"]["text"]
|
| 54 |
+
|
| 55 |
+
# Determine the midpoint of the list
|
| 56 |
+
mid_index = len(all_texts) // 2
|
| 57 |
+
# Pairs are in two halves
|
| 58 |
+
en_sentences = all_texts[:mid_index]
|
| 59 |
+
vie_sentences = all_texts[mid_index:]
|
| 60 |
+
assert len(en_sentences) == len(
|
| 61 |
+
vie_sentences
|
| 62 |
+
), "The split does not result in equal halves."
|
| 63 |
+
|
| 64 |
+
# Downsample
|
| 65 |
+
indices = list(range(len(en_sentences)))
|
| 66 |
+
random.shuffle(indices)
|
| 67 |
+
sample_indices = indices[:TEST_SAMPLES]
|
| 68 |
+
en_sentences = [en_sentences[i] for i in sample_indices]
|
| 69 |
+
vie_sentences = [vie_sentences[i] for i in sample_indices]
|
| 70 |
+
assert (
|
| 71 |
+
len(en_sentences) == len(vie_sentences) == TEST_SAMPLES
|
| 72 |
+
), f"Exceeded {TEST_SAMPLES} samples for 'test' split."
|
| 73 |
+
|
| 74 |
+
# Return dataset
|
| 75 |
+
ds["test"] = datasets.Dataset.from_dict(
|
| 76 |
+
{"sentence1": vie_sentences, "sentence2": en_sentences}
|
| 77 |
+
)
|
| 78 |
+
self.dataset = datasets.DatasetDict(ds)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/BitextMining/vie/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/__init__.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from .ara.AJGT import *
|
| 4 |
+
from .ara.HotelReviewSentimentClassification import *
|
| 5 |
+
from .ara.OnlineStoreReviewSentimentClassification import *
|
| 6 |
+
from .ara.RestaurantReviewSentimentClassification import *
|
| 7 |
+
from .ara.TweetEmotionClassification import *
|
| 8 |
+
from .ara.TweetSarcasmClassification import *
|
| 9 |
+
from .ben.BengaliDocumentClassification import *
|
| 10 |
+
from .ben.BengaliHateSpeechClassification import *
|
| 11 |
+
from .ben.BengaliSentimentAnalysis import *
|
| 12 |
+
from .bul.BulgarianStoreReviewSentimentClassfication import *
|
| 13 |
+
from .ces.CSFDCZMovieReviewSentimentClassification import *
|
| 14 |
+
from .ces.CzechProductReviewSentimentClassification import *
|
| 15 |
+
from .ces.CzechSoMeSentimentClassification import *
|
| 16 |
+
from .ces.CzechSubjectivityClassification import *
|
| 17 |
+
from .dan.AngryTweetsClassification import *
|
| 18 |
+
from .dan.DanishPoliticalCommentsClassification import *
|
| 19 |
+
from .dan.DKHateClassification import *
|
| 20 |
+
from .dan.LccSentimentClassification import *
|
| 21 |
+
from .deu.GermanPoliticiansTwitterSentimentClassification import *
|
| 22 |
+
from .deu.TenKGnadClassification import *
|
| 23 |
+
from .ell.GreekLegalCodeClassification import *
|
| 24 |
+
from .eng.AmazonPolarityClassification import *
|
| 25 |
+
from .eng.ArxivClassification import *
|
| 26 |
+
from .eng.Banking77Classification import *
|
| 27 |
+
from .eng.DBpediaClassification import *
|
| 28 |
+
from .eng.EmotionClassification import *
|
| 29 |
+
from .eng.FinancialPhrasebankClassification import *
|
| 30 |
+
from .eng.FrenkEnClassification import *
|
| 31 |
+
from .eng.ImdbClassification import *
|
| 32 |
+
from .eng.LegalBenchClassification import *
|
| 33 |
+
from .eng.NewsClassification import *
|
| 34 |
+
from .eng.PatentClassification import *
|
| 35 |
+
from .eng.PoemSentimentClassification import *
|
| 36 |
+
from .eng.ToxicChatClassification import *
|
| 37 |
+
from .eng.ToxicConversationsClassification import *
|
| 38 |
+
from .eng.TweetSentimentExtractionClassification import *
|
| 39 |
+
from .eng.TweetTopicSingleClassification import *
|
| 40 |
+
from .eng.YahooAnswersTopicsClassification import *
|
| 41 |
+
from .eng.YelpReviewFullClassification import *
|
| 42 |
+
from .est.estonian_valence import *
|
| 43 |
+
from .fas.PersianFoodSentimentClassification import *
|
| 44 |
+
from .fil.FilipinoHateSpeechClassification import *
|
| 45 |
+
from .fil.FilipinoShopeeReviewsClassification import *
|
| 46 |
+
from .fin.FinToxicityClassification import *
|
| 47 |
+
from .fra.FrenchBookReviews import *
|
| 48 |
+
from .fra.MovieReviewSentimentClassification import *
|
| 49 |
+
from .guj.GujaratiNewsClassification import *
|
| 50 |
+
from .heb.HebrewSentimentAnalysis import *
|
| 51 |
+
from .hin.HindiDiscourseClassification import *
|
| 52 |
+
from .hin.SentimentAnalysisHindi import *
|
| 53 |
+
from .hrv.FrenkHrClassification import *
|
| 54 |
+
from .ind.IndonesianIdClickbaitClassification import *
|
| 55 |
+
from .ind.IndonesianMongabayConservationClassification import *
|
| 56 |
+
from .ita.ItaCaseholdClassification import *
|
| 57 |
+
from .ita.ItalianLinguistAcceptabilityClassification import *
|
| 58 |
+
from .jav.JavaneseIMDBClassification import *
|
| 59 |
+
from .jpn.WRIMEClassification import *
|
| 60 |
+
from .kan.KannadaNewsClassification import *
|
| 61 |
+
from .kor.KlueTC import *
|
| 62 |
+
from .kor.KorFin import *
|
| 63 |
+
from .kor.KorHateClassification import *
|
| 64 |
+
from .kor.KorSarcasmClassification import *
|
| 65 |
+
from .kur.KurdishSentimentClassification import *
|
| 66 |
+
from .mal.MalayalamNewsClassification import *
|
| 67 |
+
from .mar.MarathiNewsClassification import *
|
| 68 |
+
from .mkd.MacedonianTweetSentimentClassification import *
|
| 69 |
+
from .multilingual.AfriSentiClassification import *
|
| 70 |
+
from .multilingual.AfriSentiLangClassification import *
|
| 71 |
+
from .multilingual.AmazonCounterfactualClassification import *
|
| 72 |
+
from .multilingual.AmazonReviewsClassification import *
|
| 73 |
+
from .multilingual.CataloniaTweetClassification import *
|
| 74 |
+
from .multilingual.CyrillicTurkicLangClassification import *
|
| 75 |
+
from .multilingual.HinDialectClassification import *
|
| 76 |
+
from .multilingual.IndicLangClassification import *
|
| 77 |
+
from .multilingual.IndicNLPNewsClassification import *
|
| 78 |
+
from .multilingual.IndicSentimentClassification import *
|
| 79 |
+
from .multilingual.LanguageClassification import *
|
| 80 |
+
from .multilingual.MasakhaNEWSClassification import *
|
| 81 |
+
from .multilingual.MassiveIntentClassification import *
|
| 82 |
+
from .multilingual.MassiveScenarioClassification import *
|
| 83 |
+
from .multilingual.MTOPDomainClassification import *
|
| 84 |
+
from .multilingual.MTOPIntentClassification import *
|
| 85 |
+
from .multilingual.MultiHateClassification import *
|
| 86 |
+
from .multilingual.MultilingualSentimentClassification import *
|
| 87 |
+
from .multilingual.NaijaSenti import *
|
| 88 |
+
from .multilingual.NordicLangClassification import *
|
| 89 |
+
from .multilingual.NusaXSenti import *
|
| 90 |
+
from .multilingual.ScalaClassification import *
|
| 91 |
+
from .multilingual.SIB200Classification import *
|
| 92 |
+
from .multilingual.SouthAfricanLangClassification import *
|
| 93 |
+
from .multilingual.SwissJudgementClassification import *
|
| 94 |
+
from .multilingual.TurkicClassification import *
|
| 95 |
+
from .multilingual.TweetSentimentClassification import *
|
| 96 |
+
from .mya.MyanmarNews import *
|
| 97 |
+
from .nep.NepaliNewsClassification import *
|
| 98 |
+
from .nld.DutchBookReviewSentimentClassification import *
|
| 99 |
+
from .nob.NoRecClassification import *
|
| 100 |
+
from .nob.NorwegianParliamentClassification import *
|
| 101 |
+
from .ory.OdiaNewsClassification import *
|
| 102 |
+
from .pan.PunjabiNewsClassification import *
|
| 103 |
+
from .pol.PolishClassification import *
|
| 104 |
+
from .por.HateSpeechPortugueseClassification import *
|
| 105 |
+
from .ron.Moroco import *
|
| 106 |
+
from .ron.RomanianReviewsSentiment import *
|
| 107 |
+
from .ron.RomanianSentimentClassification import *
|
| 108 |
+
from .san.SanskritShlokasClassification import *
|
| 109 |
+
from .sin.SinhalaNewsClassification import *
|
| 110 |
+
from .sin.SinhalaNewsSourceClassification import *
|
| 111 |
+
from .slk.CSFDSKMovieReviewSentimentClassification import *
|
| 112 |
+
from .slv.FrenkSlClassification import *
|
| 113 |
+
from .spa.SpanishNewsClassification import *
|
| 114 |
+
from .spa.SpanishSentimentClassification import *
|
| 115 |
+
from .ssw.SiswatiNewsClassification import *
|
| 116 |
+
from .svk.SlovakMovieReviewSentimentClassification import *
|
| 117 |
+
from .swe.DalajClassification import *
|
| 118 |
+
from .swe.SwedishSentimentClassification import *
|
| 119 |
+
from .swe.SweRecClassification import *
|
| 120 |
+
from .tam.TamilNewsClassification import *
|
| 121 |
+
from .tel.TeluguAndhraJyotiNewsClassification import *
|
| 122 |
+
from .tha.WisesightSentimentClassification import *
|
| 123 |
+
from .tsn.TswanaNewsClassification import *
|
| 124 |
+
from .tur.TurkishMovieSentimentClassification import *
|
| 125 |
+
from .tur.TurkishProductSentimentClassification import *
|
| 126 |
+
from .ukr.UkrFormalityClassification import *
|
| 127 |
+
from .urd.UrduRomanSentimentClassification import *
|
| 128 |
+
from .vie.VieStudentFeedbackClassification import *
|
| 129 |
+
from .zho.CMTEBClassification import *
|
| 130 |
+
from .zho.YueOpenriceReviewClassification import (
|
| 131 |
+
YueOpenriceReviewClassification, # noqa: F401
|
| 132 |
+
)
|
| 133 |
+
from .zul.IsiZuluNewsClassification import *
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/AJGT.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class AJGT(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="AJGT",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "komari6/ajgt_twitter_ar",
|
| 12 |
+
"revision": "af3f2fa5462ac461b696cb300d66e07ad366057f",
|
| 13 |
+
},
|
| 14 |
+
description="Arabic Jordanian General Tweets (AJGT) Corpus consisted of 1,800 tweets annotated as positive and negative. Modern Standard Arabic (MSA) or Jordanian dialect.",
|
| 15 |
+
reference="https://link.springer.com/chapter/10.1007/978-3-319-60042-0_66/",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["train"],
|
| 19 |
+
eval_langs=["ara-Arab"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2021-01-01", "2022-01-25"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Social"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="AFL",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="human-annotated",
|
| 28 |
+
dialect=["ara-arab-MSA", "ara-arab-JO"],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""
|
| 31 |
+
@inproceedings{alomari2017arabic,
|
| 32 |
+
title={Arabic tweets sentimental analysis using machine learning},
|
| 33 |
+
author={Alomari, Khaled Mohammad and ElSherif, Hatem M and Shaalan, Khaled},
|
| 34 |
+
booktitle={International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems},
|
| 35 |
+
pages={602--610},
|
| 36 |
+
year={2017},
|
| 37 |
+
organization={Springer}
|
| 38 |
+
}
|
| 39 |
+
""",
|
| 40 |
+
n_samples={"train": 1800},
|
| 41 |
+
avg_character_length={"train": 46.81},
|
| 42 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/HotelReviewSentimentClassification.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
N_SAMPLES = 2048
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class HotelReviewSentimentClassification(AbsTaskClassification):
|
| 10 |
+
metadata = TaskMetadata(
|
| 11 |
+
name="HotelReviewSentimentClassification",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "Elnagara/hard",
|
| 14 |
+
"revision": "b108d2c32ee4e1f4176ea233e1a5ac17bceb9ef9",
|
| 15 |
+
},
|
| 16 |
+
description="HARD is a dataset of Arabic hotel reviews collected from the Booking.com website.",
|
| 17 |
+
reference="https://link.springer.com/chapter/10.1007/978-3-319-67056-0_3",
|
| 18 |
+
type="Classification",
|
| 19 |
+
category="s2s",
|
| 20 |
+
eval_splits=["train"],
|
| 21 |
+
eval_langs=["ara-Arab"],
|
| 22 |
+
main_score="accuracy",
|
| 23 |
+
date=("2016-06-01", "2016-07-31"),
|
| 24 |
+
form=["written"],
|
| 25 |
+
domains=["Reviews"],
|
| 26 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 27 |
+
license="Not specified",
|
| 28 |
+
socioeconomic_status="mixed",
|
| 29 |
+
annotations_creators="derived",
|
| 30 |
+
dialect=["ara-arab-EG", "ara-arab-JO", "ara-arab-LB", "ara-arab-SA"],
|
| 31 |
+
text_creation="found",
|
| 32 |
+
bibtex_citation="""
|
| 33 |
+
@article{elnagar2018hotel,
|
| 34 |
+
title={Hotel Arabic-reviews dataset construction for sentiment analysis applications},
|
| 35 |
+
author={Elnagar, Ashraf and Khalifa, Yasmin S and Einea, Anas},
|
| 36 |
+
journal={Intelligent natural language processing: Trends and applications},
|
| 37 |
+
pages={35--52},
|
| 38 |
+
year={2018},
|
| 39 |
+
publisher={Springer}
|
| 40 |
+
}
|
| 41 |
+
""",
|
| 42 |
+
n_samples={"train": N_SAMPLES},
|
| 43 |
+
avg_character_length={"train": 137.2},
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def dataset_transform(self):
|
| 47 |
+
self.dataset = self.stratified_subsampling(
|
| 48 |
+
self.dataset, seed=self.seed, splits=["train"]
|
| 49 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/TweetSarcasmClassification.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TweetSarcasmClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="TweetSarcasmClassification",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "iabufarha/ar_sarcasm",
|
| 12 |
+
"revision": "557bf94ac6177cc442f42d0b09b6e4b76e8f47c9",
|
| 13 |
+
},
|
| 14 |
+
description="Arabic sarcasm detection dataset, which was created through the reannotation of available Arabic sentiment analysis datasets.",
|
| 15 |
+
reference="https://aclanthology.org/2020.osact-1.5/",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["ara-Arab"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2020-01-01", "2021-01-01"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Social"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="MIT",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="human-annotated",
|
| 28 |
+
dialect=["ara-arab-EG", "ara-arab-LB", "ara-arab-MA", "ara-arab-SA"],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""
|
| 31 |
+
@inproceedings{abu-farha-magdy-2020-arabic,
|
| 32 |
+
title = "From {A}rabic Sentiment Analysis to Sarcasm Detection: The {A}r{S}arcasm Dataset",
|
| 33 |
+
author = "Abu Farha, Ibrahim and
|
| 34 |
+
Magdy, Walid",
|
| 35 |
+
editor = "Al-Khalifa, Hend and
|
| 36 |
+
Magdy, Walid and
|
| 37 |
+
Darwish, Kareem and
|
| 38 |
+
Elsayed, Tamer and
|
| 39 |
+
Mubarak, Hamdy",
|
| 40 |
+
booktitle = "Proceedings of the 4th Workshop on Open-Source Arabic Corpora and Processing Tools, with a Shared Task on Offensive Language Detection",
|
| 41 |
+
month = may,
|
| 42 |
+
year = "2020",
|
| 43 |
+
address = "Marseille, France",
|
| 44 |
+
publisher = "European Language Resource Association",
|
| 45 |
+
url = "https://aclanthology.org/2020.osact-1.5",
|
| 46 |
+
pages = "32--39",
|
| 47 |
+
abstract = "Sarcasm is one of the main challenges for sentiment analysis systems. Its complexity comes from the expression of opinion using implicit indirect phrasing. In this paper, we present ArSarcasm, an Arabic sarcasm detection dataset, which was created through the reannotation of available Arabic sentiment analysis datasets. The dataset contains 10,547 tweets, 16{\%} of which are sarcastic. In addition to sarcasm the data was annotated for sentiment and dialects. Our analysis shows the highly subjective nature of these tasks, which is demonstrated by the shift in sentiment labels based on annotators{'} biases. Experiments show the degradation of state-of-the-art sentiment analysers when faced with sarcastic content. Finally, we train a deep learning model for sarcasm detection using BiLSTM. The model achieves an F1 score of 0.46, which shows the challenging nature of the task, and should act as a basic baseline for future research on our dataset.",
|
| 48 |
+
language = "English",
|
| 49 |
+
ISBN = "979-10-95546-51-1",
|
| 50 |
+
}
|
| 51 |
+
""",
|
| 52 |
+
n_samples={"test": 2110},
|
| 53 |
+
avg_character_length={"test": 102.1},
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def dataset_transform(self):
|
| 57 |
+
# labels: 0 non-sarcastic, 1 sarcastic
|
| 58 |
+
self.dataset = self.dataset.rename_columns(
|
| 59 |
+
{"tweet": "text", "sarcasm": "label"}
|
| 60 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ara/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliDocumentClassification.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BengaliDocumentClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="BengaliDocumentClassification",
|
| 10 |
+
description="Dataset for News Classification, categorized with 13 domains.",
|
| 11 |
+
reference="https://aclanthology.org/2023.eacl-main.4",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "dialect-ai/shironaam",
|
| 14 |
+
"revision": "1c6e67433da618073295b7c90f1c55fa8e78f35c",
|
| 15 |
+
},
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["ben-Beng"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2022-05-01", "2023-05-01"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
dialect=[],
|
| 24 |
+
domains=["News"],
|
| 25 |
+
task_subtypes=[],
|
| 26 |
+
license="CC BY-NC-SA 4.0",
|
| 27 |
+
socioeconomic_status="mixed",
|
| 28 |
+
annotations_creators="derived",
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""
|
| 31 |
+
@inproceedings{akash-etal-2023-shironaam,
|
| 32 |
+
title = "Shironaam: {B}engali News Headline Generation using Auxiliary Information",
|
| 33 |
+
author = "Akash, Abu Ubaida and
|
| 34 |
+
Nayeem, Mir Tafseer and
|
| 35 |
+
Shohan, Faisal Tareque and
|
| 36 |
+
Islam, Tanvir",
|
| 37 |
+
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
|
| 38 |
+
month = may,
|
| 39 |
+
year = "2023",
|
| 40 |
+
address = "Dubrovnik, Croatia",
|
| 41 |
+
publisher = "Association for Computational Linguistics",
|
| 42 |
+
url = "https://aclanthology.org/2023.eacl-main.4",
|
| 43 |
+
pages = "52--67"
|
| 44 |
+
}
|
| 45 |
+
""",
|
| 46 |
+
n_samples={"test": 2048},
|
| 47 |
+
avg_character_length={"test": 1658.1},
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
def dataset_transform(self) -> None:
|
| 51 |
+
self.dataset = self.dataset.rename_columns(
|
| 52 |
+
{"article": "text", "category": "label"}
|
| 53 |
+
)
|
| 54 |
+
self.dataset = self.stratified_subsampling(
|
| 55 |
+
self.dataset, seed=self.seed, splits=["test"]
|
| 56 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliHateSpeechClassification.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BengaliHateSpeechClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="BengaliHateSpeechClassification",
|
| 10 |
+
description="The Bengali Hate Speech Dataset is a Bengali-language dataset of news articles collected from various Bengali media sources and categorized based on the type of hate in the text.",
|
| 11 |
+
reference="https://huggingface.co/datasets/bn_hate_speech",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "rezacsedu/bn_hate_speech",
|
| 14 |
+
"revision": "99612296bc093f0720cac7d7cbfcb67eecf1ca2f",
|
| 15 |
+
},
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["train"],
|
| 19 |
+
eval_langs=["ben-Beng"],
|
| 20 |
+
main_score="f1",
|
| 21 |
+
date=("2019-12-01", "2020-04-09"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
dialect=[],
|
| 24 |
+
domains=["News"],
|
| 25 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 26 |
+
license="MIT",
|
| 27 |
+
socioeconomic_status="mixed",
|
| 28 |
+
annotations_creators="expert-annotated",
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@inproceedings{karim2020BengaliNLP,
|
| 31 |
+
title={Classification Benchmarks for Under-resourced Bengali Language based on Multichannel Convolutional-LSTM Network},
|
| 32 |
+
author={Karim, Md. Rezaul and Chakravarti, Bharathi Raja and P. McCrae, John and Cochez, Michael},
|
| 33 |
+
booktitle={7th IEEE International Conference on Data Science and Advanced Analytics (IEEE DSAA,2020)},
|
| 34 |
+
publisher={IEEE},
|
| 35 |
+
year={2020}
|
| 36 |
+
}
|
| 37 |
+
""",
|
| 38 |
+
n_samples={"train": 3418},
|
| 39 |
+
avg_character_length={"train": 103.42},
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def dataset_transform(self):
|
| 43 |
+
self.dataset = self.stratified_subsampling(
|
| 44 |
+
self.dataset, seed=self.seed, splits=["train"]
|
| 45 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/BengaliSentimentAnalysis.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BengaliSentimentAnalysis(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="BengaliSentimentAnalysis",
|
| 10 |
+
description="dataset contains 3307 Negative reviews and 8500 Positive reviews collected and manually annotated from Youtube Bengali drama.",
|
| 11 |
+
reference="https://data.mendeley.com/datasets/p6zc7krs37/4",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "Akash190104/bengali_sentiment_analysis",
|
| 14 |
+
"revision": "a4b3685b1854cc26c554dda4c7cb918a36a6fb6c",
|
| 15 |
+
},
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["train"],
|
| 19 |
+
eval_langs=["ben-Beng"],
|
| 20 |
+
main_score="f1",
|
| 21 |
+
date=("2020-06-24", "2020-11-26"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
dialect=[],
|
| 24 |
+
domains=["Reviews"],
|
| 25 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 26 |
+
license="CC BY 4.0",
|
| 27 |
+
socioeconomic_status="mixed",
|
| 28 |
+
annotations_creators="human-annotated",
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@inproceedings{sazzed2020cross,
|
| 31 |
+
title={Cross-lingual sentiment classification in low-resource Bengali language},
|
| 32 |
+
author={Sazzed, Salim},
|
| 33 |
+
booktitle={Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)},
|
| 34 |
+
pages={50--60},
|
| 35 |
+
year={2020}
|
| 36 |
+
}""",
|
| 37 |
+
n_samples={"train": 11807},
|
| 38 |
+
avg_character_length={"train": 69.66},
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def dataset_transform(self):
|
| 42 |
+
self.dataset = self.stratified_subsampling(
|
| 43 |
+
self.dataset, seed=self.seed, splits=["train"]
|
| 44 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ben/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/BulgarianStoreReviewSentimentClassfication.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BulgarianStoreReviewSentimentClassfication(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="BulgarianStoreReviewSentimentClassfication",
|
| 10 |
+
description="Bulgarian online store review dataset for sentiment classification.",
|
| 11 |
+
reference="https://doi.org/10.7910/DVN/TXIK9P",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "artist/Bulgarian-Online-Store-Feedback-Text-Analysis",
|
| 14 |
+
"revision": "701984d6c6efea0e14a1c7850ef70e464c5577c0",
|
| 15 |
+
},
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
date=("2018-05-14", "2018-05-14"),
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["bul-Cyrl"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Reviews"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="cc-by-4.0",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="human-annotated",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@data{DVN/TXIK9P_2018,
|
| 31 |
+
author = {Georgieva-Trifonova, Tsvetanka and Stefanova, Milena and Kalchev, Stefan},
|
| 32 |
+
publisher = {Harvard Dataverse},
|
| 33 |
+
title = {{Dataset for ``Customer Feedback Text Analysis for Online Stores Reviews in Bulgarian''}},
|
| 34 |
+
year = {2018},
|
| 35 |
+
version = {V1},
|
| 36 |
+
doi = {10.7910/DVN/TXIK9P},
|
| 37 |
+
url = {https://doi.org/10.7910/DVN/TXIK9P}
|
| 38 |
+
}
|
| 39 |
+
""",
|
| 40 |
+
n_samples={"test": 182},
|
| 41 |
+
avg_character_length={"test": 316.7},
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
def dataset_transform(self):
|
| 45 |
+
self.dataset = self.dataset.rename_columns(
|
| 46 |
+
{"Review": "text", "Category": "label"}
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
labels = self.dataset["train"]["label"]
|
| 50 |
+
lab2idx = {lab: idx for idx, lab in enumerate(sorted(set(labels)))}
|
| 51 |
+
|
| 52 |
+
self.dataset = self.dataset.map(
|
| 53 |
+
lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"]
|
| 54 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/bul/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CSFDCZMovieReviewSentimentClassification.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
N_SAMPLES = 2048
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class CSFDCZMovieReviewSentimentClassification(AbsTaskClassification):
|
| 10 |
+
metadata = TaskMetadata(
|
| 11 |
+
name="CSFDCZMovieReviewSentimentClassification",
|
| 12 |
+
description="The dataset contains 30k user reviews from csfd.cz in Czech.",
|
| 13 |
+
reference="https://arxiv.org/abs/2304.01922",
|
| 14 |
+
dataset={
|
| 15 |
+
"path": "fewshot-goes-multilingual/cs_csfd-movie-reviews",
|
| 16 |
+
"revision": "dd2ede6faaea338ef6b1e2966f06808656975a23",
|
| 17 |
+
},
|
| 18 |
+
type="Classification",
|
| 19 |
+
category="s2s",
|
| 20 |
+
date=("2002-06-28", "2020-03-13"),
|
| 21 |
+
eval_splits=["test"],
|
| 22 |
+
eval_langs=["ces-Latn"],
|
| 23 |
+
main_score="accuracy",
|
| 24 |
+
form=["written"],
|
| 25 |
+
domains=["Reviews"],
|
| 26 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 27 |
+
license="CC-BY-SA-4.0",
|
| 28 |
+
socioeconomic_status="mixed",
|
| 29 |
+
annotations_creators="derived",
|
| 30 |
+
dialect=[],
|
| 31 |
+
text_creation="found",
|
| 32 |
+
bibtex_citation="""
|
| 33 |
+
@misc{štefánik2023resources,
|
| 34 |
+
title={Resources and Few-shot Learners for In-context Learning in Slavic Languages},
|
| 35 |
+
author={Michal Štefánik and Marek Kadlčík and Piotr Gramacki and Petr Sojka},
|
| 36 |
+
year={2023},
|
| 37 |
+
eprint={2304.01922},
|
| 38 |
+
archivePrefix={arXiv},
|
| 39 |
+
primaryClass={cs.CL}
|
| 40 |
+
}
|
| 41 |
+
""",
|
| 42 |
+
n_samples={"test": N_SAMPLES},
|
| 43 |
+
avg_character_length={"test": 386.5},
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
@property
|
| 47 |
+
def metadata_dict(self):
|
| 48 |
+
md = super().metadata_dict
|
| 49 |
+
# Increase the samples_per_label in order to improve baseline performance
|
| 50 |
+
md["samples_per_label"] = 20
|
| 51 |
+
return md
|
| 52 |
+
|
| 53 |
+
def dataset_transform(self):
|
| 54 |
+
self.dataset = self.dataset.rename_columns(
|
| 55 |
+
{"comment": "text", "rating_int": "label"}
|
| 56 |
+
)
|
| 57 |
+
self.dataset = self.stratified_subsampling(
|
| 58 |
+
self.dataset, seed=self.seed, splits=["test"], n_samples=N_SAMPLES
|
| 59 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechProductReviewSentimentClassification.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class CzechProductReviewSentimentClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="CzechProductReviewSentimentClassification",
|
| 10 |
+
description="User reviews of products on Czech e-shop Mall.cz with 3 sentiment classes (positive, neutral, negative)",
|
| 11 |
+
reference="https://aclanthology.org/W13-1609/",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "fewshot-goes-multilingual/cs_mall-product-reviews",
|
| 14 |
+
"revision": "2e6fedf42c9c104e83dfd95c3a453721e683e244",
|
| 15 |
+
},
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["ces-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2013-01-01", "2013-06-01"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
dialect=[],
|
| 24 |
+
domains=["Reviews"],
|
| 25 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 26 |
+
license="CC BY-NC-SA 4.0",
|
| 27 |
+
socioeconomic_status="mixed",
|
| 28 |
+
annotations_creators="derived",
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""
|
| 31 |
+
@inproceedings{habernal-etal-2013-sentiment,
|
| 32 |
+
title = "Sentiment Analysis in {C}zech Social Media Using Supervised Machine Learning",
|
| 33 |
+
author = "Habernal, Ivan and
|
| 34 |
+
Pt{\'a}{\v{c}}ek, Tom{\'a}{\v{s}} and
|
| 35 |
+
Steinberger, Josef",
|
| 36 |
+
editor = "Balahur, Alexandra and
|
| 37 |
+
van der Goot, Erik and
|
| 38 |
+
Montoyo, Andres",
|
| 39 |
+
booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis",
|
| 40 |
+
month = jun,
|
| 41 |
+
year = "2013",
|
| 42 |
+
address = "Atlanta, Georgia",
|
| 43 |
+
publisher = "Association for Computational Linguistics",
|
| 44 |
+
url = "https://aclanthology.org/W13-1609",
|
| 45 |
+
pages = "65--74",
|
| 46 |
+
}
|
| 47 |
+
""",
|
| 48 |
+
n_samples={"test": 2048},
|
| 49 |
+
avg_character_length={"test": 153.26},
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 54 |
+
metadata_dict = super().metadata_dict
|
| 55 |
+
metadata_dict["n_experiments"] = 10
|
| 56 |
+
metadata_dict["samples_per_label"] = 16
|
| 57 |
+
return metadata_dict
|
| 58 |
+
|
| 59 |
+
def dataset_transform(self) -> None:
|
| 60 |
+
self.dataset = self.dataset.rename_columns(
|
| 61 |
+
{"comment": "text", "rating_str": "label"}
|
| 62 |
+
)
|
| 63 |
+
self.dataset = self.stratified_subsampling(
|
| 64 |
+
self.dataset, seed=self.seed, splits=["test"]
|
| 65 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSoMeSentimentClassification.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class CzechSoMeSentimentClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="CzechSoMeSentimentClassification",
|
| 10 |
+
description="User comments on Facebook",
|
| 11 |
+
reference="https://aclanthology.org/W13-1609/",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "fewshot-goes-multilingual/cs_facebook-comments",
|
| 14 |
+
"revision": "6ced1d87a030915822b087bf539e6d5c658f1988",
|
| 15 |
+
},
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["ces-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2013-01-01", "2013-06-01"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
dialect=[],
|
| 24 |
+
domains=["Reviews"],
|
| 25 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 26 |
+
license="CC BY-NC-SA 4.0",
|
| 27 |
+
socioeconomic_status="mixed",
|
| 28 |
+
annotations_creators="derived",
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""
|
| 31 |
+
@inproceedings{habernal-etal-2013-sentiment,
|
| 32 |
+
title = "Sentiment Analysis in {C}zech Social Media Using Supervised Machine Learning",
|
| 33 |
+
author = "Habernal, Ivan and
|
| 34 |
+
Pt{\'a}{\v{c}}ek, Tom{\'a}{\v{s}} and
|
| 35 |
+
Steinberger, Josef",
|
| 36 |
+
editor = "Balahur, Alexandra and
|
| 37 |
+
van der Goot, Erik and
|
| 38 |
+
Montoyo, Andres",
|
| 39 |
+
booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis",
|
| 40 |
+
month = jun,
|
| 41 |
+
year = "2013",
|
| 42 |
+
address = "Atlanta, Georgia",
|
| 43 |
+
publisher = "Association for Computational Linguistics",
|
| 44 |
+
url = "https://aclanthology.org/W13-1609",
|
| 45 |
+
pages = "65--74",
|
| 46 |
+
}
|
| 47 |
+
""",
|
| 48 |
+
n_samples={"test": 1000},
|
| 49 |
+
avg_character_length={"test": 59.89},
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 54 |
+
metadata_dict = super().metadata_dict
|
| 55 |
+
metadata_dict["n_experiments"] = 10
|
| 56 |
+
metadata_dict["samples_per_label"] = 16
|
| 57 |
+
return metadata_dict
|
| 58 |
+
|
| 59 |
+
def dataset_transform(self) -> None:
|
| 60 |
+
self.dataset = self.dataset.rename_columns(
|
| 61 |
+
{"comment": "text", "sentiment_int": "label"}
|
| 62 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/CzechSubjectivityClassification.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class CzechSubjectivityClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="CzechSubjectivityClassification",
|
| 10 |
+
description="An Czech dataset for subjectivity classification.",
|
| 11 |
+
reference="https://arxiv.org/abs/2009.08712",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "pauli31/czech-subjectivity-dataset",
|
| 14 |
+
"revision": "e387ddf167f3eba99936cff89909ed6264f17e1f",
|
| 15 |
+
},
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
date=("2022-04-01", "2022-04-01"),
|
| 19 |
+
eval_splits=["validation", "test"],
|
| 20 |
+
eval_langs=["ces-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Reviews"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="Not specified",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="human-annotated",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@inproceedings{priban-steinberger-2022-czech,
|
| 31 |
+
title = "\{C\}zech Dataset for Cross-lingual Subjectivity Classification",
|
| 32 |
+
author = "P{\v{r}}ib{\'a}{\v{n}}, Pavel and
|
| 33 |
+
Steinberger, Josef",
|
| 34 |
+
booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
|
| 35 |
+
month = jun,
|
| 36 |
+
year = "2022",
|
| 37 |
+
address = "Marseille, France",
|
| 38 |
+
publisher = "European Language Resources Association",
|
| 39 |
+
url = "https://aclanthology.org/2022.lrec-1.148",
|
| 40 |
+
pages = "1381--1391",
|
| 41 |
+
}
|
| 42 |
+
""",
|
| 43 |
+
n_samples={"validation": 500, "test": 2000},
|
| 44 |
+
avg_character_length={"validation": 108.2, "test": 108.3},
|
| 45 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ces/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/AngryTweetsClassification.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class AngryTweetsClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="AngryTweetsClassification",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "DDSC/angry-tweets",
|
| 12 |
+
"revision": "20b0e6081892e78179356fada741b7afa381443d",
|
| 13 |
+
},
|
| 14 |
+
description="A sentiment dataset with 3 classes (positiv, negativ, neutral) for Danish tweets",
|
| 15 |
+
reference="https://aclanthology.org/2021.nodalida-main.53/",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["dan-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2021-01-01", "2021-12-31"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Social"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="CC-BY-4.0",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="human-annotated",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@inproceedings{pauli2021danlp,
|
| 31 |
+
title={DaNLP: An open-source toolkit for Danish Natural Language Processing},
|
| 32 |
+
author={Pauli, Amalie Brogaard and Barrett, Maria and Lacroix, Oph{\'e}lie and Hvingelby, Rasmus},
|
| 33 |
+
booktitle={Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa)},
|
| 34 |
+
pages={460--466},
|
| 35 |
+
year={2021}
|
| 36 |
+
}""",
|
| 37 |
+
n_samples={"test": 1050},
|
| 38 |
+
avg_character_length={"test": 156.1},
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
@property
|
| 42 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 43 |
+
metadata_dict = super().metadata_dict
|
| 44 |
+
metadata_dict["n_experiments"] = 10
|
| 45 |
+
metadata_dict["samples_per_label"] = 16
|
| 46 |
+
return metadata_dict
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DKHateClassification.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class DKHateClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="DKHateClassification",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "DDSC/dkhate",
|
| 12 |
+
"revision": "59d12749a3c91a186063c7d729ec392fda94681c",
|
| 13 |
+
},
|
| 14 |
+
description="Danish Tweets annotated for Hate Speech either being Offensive or not",
|
| 15 |
+
reference="https://aclanthology.org/2020.lrec-1.430/",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["dan-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2018-01-01", "2018-12-31"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Social"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="CC-BY-4.0",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="expert-annotated",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@inproceedings{sigurbergsson-derczynski-2020-offensive,
|
| 31 |
+
title = "Offensive Language and Hate Speech Detection for {D}anish",
|
| 32 |
+
author = "Sigurbergsson, Gudbjartur Ingi and
|
| 33 |
+
Derczynski, Leon",
|
| 34 |
+
editor = "Calzolari, Nicoletta and
|
| 35 |
+
B{\'e}chet, Fr{\'e}d{\'e}ric and
|
| 36 |
+
Blache, Philippe and
|
| 37 |
+
Choukri, Khalid and
|
| 38 |
+
Cieri, Christopher and
|
| 39 |
+
Declerck, Thierry and
|
| 40 |
+
Goggi, Sara and
|
| 41 |
+
Isahara, Hitoshi and
|
| 42 |
+
Maegaard, Bente and
|
| 43 |
+
Mariani, Joseph and
|
| 44 |
+
Mazo, H{\'e}l{\`e}ne and
|
| 45 |
+
Moreno, Asuncion and
|
| 46 |
+
Odijk, Jan and
|
| 47 |
+
Piperidis, Stelios",
|
| 48 |
+
booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
|
| 49 |
+
month = may,
|
| 50 |
+
year = "2020",
|
| 51 |
+
address = "Marseille, France",
|
| 52 |
+
publisher = "European Language Resources Association",
|
| 53 |
+
url = "https://aclanthology.org/2020.lrec-1.430",
|
| 54 |
+
pages = "3498--3508",
|
| 55 |
+
abstract = "The presence of offensive language on social media platforms and the implications this poses is becoming a major concern in modern society. Given the enormous amount of content created every day, automatic methods are required to detect and deal with this type of content. Until now, most of the research has focused on solving the problem for the English language, while the problem is multilingual. We construct a Danish dataset DKhate containing user-generated comments from various social media platforms, and to our knowledge, the first of its kind, annotated for various types and target of offensive language. We develop four automatic classification systems, each designed to work for both the English and the Danish language. In the detection of offensive language in English, the best performing system achieves a macro averaged F1-score of 0.74, and the best performing system for Danish achieves a macro averaged F1-score of 0.70. In the detection of whether or not an offensive post is targeted, the best performing system for English achieves a macro averaged F1-score of 0.62, while the best performing system for Danish achieves a macro averaged F1-score of 0.73. Finally, in the detection of the target type in a targeted offensive post, the best performing system for English achieves a macro averaged F1-score of 0.56, and the best performing system for Danish achieves a macro averaged F1-score of 0.63. Our work for both the English and the Danish language captures the type and targets of offensive language, and present automatic methods for detecting different kinds of offensive language such as hate speech and cyberbullying.",
|
| 56 |
+
language = "English",
|
| 57 |
+
ISBN = "979-10-95546-34-4",
|
| 58 |
+
}""",
|
| 59 |
+
n_samples={"test": 329},
|
| 60 |
+
avg_character_length={"test": 104.0},
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 65 |
+
metadata_dict = dict(self.metadata)
|
| 66 |
+
metadata_dict["n_experiments"] = 10
|
| 67 |
+
metadata_dict["samples_per_label"] = 16
|
| 68 |
+
return metadata_dict
|
| 69 |
+
|
| 70 |
+
def dataset_transform(self):
|
| 71 |
+
# convert label to a 0/1 label
|
| 72 |
+
labels = self.dataset["train"]["label"] # type: ignore
|
| 73 |
+
lab2idx = {lab: idx for idx, lab in enumerate(set(labels))}
|
| 74 |
+
self.dataset = self.dataset.map(
|
| 75 |
+
lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"]
|
| 76 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DanishPoliticalCommentsClassification.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class DanishPoliticalCommentsClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="DanishPoliticalCommentsClassification",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "danish_political_comments",
|
| 12 |
+
"revision": "edbb03726c04a0efab14fc8c3b8b79e4d420e5a1",
|
| 13 |
+
},
|
| 14 |
+
description="A dataset of Danish political comments rated for sentiment",
|
| 15 |
+
reference="https://huggingface.co/datasets/danish_political_comments",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["train"],
|
| 19 |
+
eval_langs=["dan-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=(
|
| 22 |
+
"2000-01-01",
|
| 23 |
+
"2022-12-31",
|
| 24 |
+
), # Estimated range for the collection of comments
|
| 25 |
+
form=["written"],
|
| 26 |
+
domains=["Social"],
|
| 27 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 28 |
+
license="Not specified",
|
| 29 |
+
socioeconomic_status="mixed",
|
| 30 |
+
annotations_creators="derived",
|
| 31 |
+
dialect=[],
|
| 32 |
+
text_creation="found",
|
| 33 |
+
bibtex_citation="",
|
| 34 |
+
n_samples={"train": 9010},
|
| 35 |
+
avg_character_length={"train": 69.9},
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
@property
|
| 39 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 40 |
+
metadata_dict = dict(self.metadata)
|
| 41 |
+
metadata_dict["n_experiments"] = 10
|
| 42 |
+
metadata_dict["samples_per_label"] = 16
|
| 43 |
+
return metadata_dict
|
| 44 |
+
|
| 45 |
+
def dataset_transform(self):
|
| 46 |
+
self.dataset = self.dataset.rename_column("sentence", "text")
|
| 47 |
+
self.dataset = self.dataset.rename_column("target", "label")
|
| 48 |
+
|
| 49 |
+
# create train and test splits
|
| 50 |
+
self.dataset = self.dataset["train"].train_test_split(0.2, seed=self.seed)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/DdiscoCohesionClassification.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class DdiscoCohesionClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="Ddisco",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "DDSC/ddisco",
|
| 12 |
+
"revision": "514ab557579fcfba538a4078d6d647248a0e6eb7",
|
| 13 |
+
},
|
| 14 |
+
description="A Danish Discourse dataset with values for coherence and source (Wikipedia or Reddit)",
|
| 15 |
+
reference="https://aclanthology.org/2022.lrec-1.260/",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["dan-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2021-01-01", "2022-06-25"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Non-fiction", "Social"],
|
| 24 |
+
dialect=[],
|
| 25 |
+
task_subtypes=["Discourse coherence"],
|
| 26 |
+
license="cc-by-sa-3.0",
|
| 27 |
+
socioeconomic_status="high",
|
| 28 |
+
annotations_creators="expert-annotated",
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""
|
| 31 |
+
@inproceedings{flansmose-mikkelsen-etal-2022-ddisco,
|
| 32 |
+
title = "{DD}is{C}o: A Discourse Coherence Dataset for {D}anish",
|
| 33 |
+
author = "Flansmose Mikkelsen, Linea and
|
| 34 |
+
Kinch, Oliver and
|
| 35 |
+
Jess Pedersen, Anders and
|
| 36 |
+
Lacroix, Oph{\'e}lie",
|
| 37 |
+
editor = "Calzolari, Nicoletta and
|
| 38 |
+
B{\'e}chet, Fr{\'e}d{\'e}ric and
|
| 39 |
+
Blache, Philippe and
|
| 40 |
+
Choukri, Khalid and
|
| 41 |
+
Cieri, Christopher and
|
| 42 |
+
Declerck, Thierry and
|
| 43 |
+
Goggi, Sara and
|
| 44 |
+
Isahara, Hitoshi and
|
| 45 |
+
Maegaard, Bente and
|
| 46 |
+
Mariani, Joseph and
|
| 47 |
+
Mazo, H{\'e}l{\`e}ne and
|
| 48 |
+
Odijk, Jan and
|
| 49 |
+
Piperidis, Stelios",
|
| 50 |
+
booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
|
| 51 |
+
month = jun,
|
| 52 |
+
year = "2022",
|
| 53 |
+
address = "Marseille, France",
|
| 54 |
+
publisher = "European Language Resources Association",
|
| 55 |
+
url = "https://aclanthology.org/2022.lrec-1.260",
|
| 56 |
+
pages = "2440--2445",
|
| 57 |
+
abstract = "To date, there has been no resource for studying discourse coherence on real-world Danish texts. Discourse coherence has mostly been approached with the assumption that incoherent texts can be represented by coherent texts in which sentences have been shuffled. However, incoherent real-world texts rarely resemble that. We thus present DDisCo, a dataset including text from the Danish Wikipedia and Reddit annotated for discourse coherence. We choose to annotate real-world texts instead of relying on artificially incoherent text for training and testing models. Then, we evaluate the performance of several methods, including neural networks, on the dataset.",
|
| 58 |
+
}
|
| 59 |
+
""",
|
| 60 |
+
n_samples=None,
|
| 61 |
+
avg_character_length=None,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def dataset_transform(self):
|
| 65 |
+
self.dataset = self.dataset.rename_columns({"rating": "label"}).remove_columns(
|
| 66 |
+
["domain"]
|
| 67 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/LccSentimentClassification.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class LccSentimentClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="LccSentimentClassification",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "DDSC/lcc",
|
| 12 |
+
"revision": "de7ba3406ee55ea2cc52a0a41408fa6aede6d3c6",
|
| 13 |
+
},
|
| 14 |
+
description="The leipzig corpora collection, annotated for sentiment",
|
| 15 |
+
reference="https://github.com/fnielsen/lcc-sentiment",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["dan-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2006-01-01", "2006-12-31"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["News", "Web"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="CC-BY-4.0",
|
| 26 |
+
socioeconomic_status="mixed",
|
| 27 |
+
annotations_creators="expert-annotated",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@inproceedings{quasthoff-etal-2006-corpus,
|
| 31 |
+
title = "Corpus Portal for Search in Monolingual Corpora",
|
| 32 |
+
author = "Quasthoff, Uwe and
|
| 33 |
+
Richter, Matthias and
|
| 34 |
+
Biemann, Christian",
|
| 35 |
+
editor = "Calzolari, Nicoletta and
|
| 36 |
+
Choukri, Khalid and
|
| 37 |
+
Gangemi, Aldo and
|
| 38 |
+
Maegaard, Bente and
|
| 39 |
+
Mariani, Joseph and
|
| 40 |
+
Odijk, Jan and
|
| 41 |
+
Tapias, Daniel",
|
| 42 |
+
booktitle = "Proceedings of the Fifth International Conference on Language Resources and Evaluation ({LREC}{'}06)",
|
| 43 |
+
month = may,
|
| 44 |
+
year = "2006",
|
| 45 |
+
address = "Genoa, Italy",
|
| 46 |
+
publisher = "European Language Resources Association (ELRA)",
|
| 47 |
+
url = "http://www.lrec-conf.org/proceedings/lrec2006/pdf/641_pdf.pdf",
|
| 48 |
+
abstract = "A simple and flexible schema for storing and presenting monolingual language resources is proposed. In this format, data for 18 different languages is already available in various sizes. The data is provided free of charge for online use and download. The main target is to ease the application of algorithms for monolingual and interlingual studies.",
|
| 49 |
+
}""",
|
| 50 |
+
n_samples={"test": 150},
|
| 51 |
+
avg_character_length={"test": 118.7},
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
@property
|
| 55 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 56 |
+
metadata_dict = super().metadata_dict
|
| 57 |
+
metadata_dict["n_experiments"] = 10
|
| 58 |
+
metadata_dict["samples_per_label"] = 16
|
| 59 |
+
return metadata_dict
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/dan/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/GermanPoliticiansTwitterSentimentClassification.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class GermanPoliticiansTwitterSentimentClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="GermanPoliticiansTwitterSentimentClassification",
|
| 11 |
+
description="GermanPoliticiansTwitterSentiment is a dataset of German tweets categorized with their sentiment (3 classes).",
|
| 12 |
+
reference="https://aclanthology.org/2022.konvens-1.9",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "Alienmaster/german_politicians_twitter_sentiment",
|
| 15 |
+
"revision": "65343b17f5a76227ab2e15b9424dfab6466ffcb1",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["deu-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=("2021-01-01", "2021-12-31"),
|
| 23 |
+
form=["written"],
|
| 24 |
+
domains=["Social", "Government"],
|
| 25 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 26 |
+
license="Not specified",
|
| 27 |
+
socioeconomic_status="high",
|
| 28 |
+
annotations_creators="human-annotated",
|
| 29 |
+
dialect=[],
|
| 30 |
+
text_creation="found",
|
| 31 |
+
bibtex_citation="""
|
| 32 |
+
@inproceedings{schmidt-etal-2022-sentiment,
|
| 33 |
+
title = "Sentiment Analysis on {T}witter for the Major {G}erman Parties during the 2021 {G}erman Federal Election",
|
| 34 |
+
author = "Schmidt, Thomas and
|
| 35 |
+
Fehle, Jakob and
|
| 36 |
+
Weissenbacher, Maximilian and
|
| 37 |
+
Richter, Jonathan and
|
| 38 |
+
Gottschalk, Philipp and
|
| 39 |
+
Wolff, Christian",
|
| 40 |
+
editor = "Schaefer, Robin and
|
| 41 |
+
Bai, Xiaoyu and
|
| 42 |
+
Stede, Manfred and
|
| 43 |
+
Zesch, Torsten",
|
| 44 |
+
booktitle = "Proceedings of the 18th Conference on Natural Language Processing (KONVENS 2022)",
|
| 45 |
+
month = "12--15 " # sep,
|
| 46 |
+
year = "2022",
|
| 47 |
+
address = "Potsdam, Germany",
|
| 48 |
+
publisher = "KONVENS 2022 Organizers",
|
| 49 |
+
url = "https://aclanthology.org/2022.konvens-1.9",
|
| 50 |
+
pages = "74--87",
|
| 51 |
+
}
|
| 52 |
+
""",
|
| 53 |
+
n_samples={"test": 357},
|
| 54 |
+
avg_character_length={"test": 302.48},
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
def dataset_transform(self):
|
| 58 |
+
self.dataset = self.dataset.rename_column("majority_sentiment", "label")
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/TenKGnadClassification.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TenKGnadClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="TenKGnadClassification",
|
| 11 |
+
description="10k German News Articles Dataset (10kGNAD) contains news articles from the online Austrian newspaper website DER Standard with their topic classification (9 classes).",
|
| 12 |
+
reference="https://tblock.github.io/10kGNAD/",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "gnad10",
|
| 15 |
+
"revision": "0798affe9b3f88cfda4267b6fbc50fac67046ee5",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="p2p",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["deu-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=("2015-06-01", "2016-05-31"),
|
| 23 |
+
form=["written"],
|
| 24 |
+
domains=["News"],
|
| 25 |
+
task_subtypes=["Topic classification"],
|
| 26 |
+
license="cc-by-nc-sa-4.0",
|
| 27 |
+
socioeconomic_status="medium",
|
| 28 |
+
annotations_creators="expert-annotated",
|
| 29 |
+
dialect=[],
|
| 30 |
+
text_creation="found",
|
| 31 |
+
bibtex_citation="""
|
| 32 |
+
@InProceedings{Schabus2017,
|
| 33 |
+
Author = {Dietmar Schabus and Marcin Skowron and Martin Trapp},
|
| 34 |
+
Title = {One Million Posts: A Data Set of German Online Discussions},
|
| 35 |
+
Booktitle = {Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)},
|
| 36 |
+
Pages = {1241--1244},
|
| 37 |
+
Year = {2017},
|
| 38 |
+
Address = {Tokyo, Japan},
|
| 39 |
+
Doi = {10.1145/3077136.3080711},
|
| 40 |
+
Month = aug
|
| 41 |
+
}
|
| 42 |
+
""",
|
| 43 |
+
n_samples={"test": 1028},
|
| 44 |
+
avg_character_length={"test": 2627.31},
|
| 45 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/deu/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/ell/GreekLegalCodeClassification.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
TEST_SAMPLES = 2048
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class GreekLegalCodeClassification(AbsTaskClassification):
|
| 10 |
+
metadata = TaskMetadata(
|
| 11 |
+
name="GreekLegalCodeClassification",
|
| 12 |
+
description="Greek Legal Code Dataset for Classification. (subset = chapter)",
|
| 13 |
+
reference="https://arxiv.org/abs/2109.15298",
|
| 14 |
+
dataset={
|
| 15 |
+
"path": "AI-team-UoA/greek_legal_code",
|
| 16 |
+
"revision": "de0fdb34424f07d1ac6f0ede23ee0ed44bd9f5d1",
|
| 17 |
+
"name": "chapter",
|
| 18 |
+
},
|
| 19 |
+
type="Classification",
|
| 20 |
+
category="s2s",
|
| 21 |
+
date=("2021-01-01", "2021-01-01"),
|
| 22 |
+
eval_splits=["validation", "test"],
|
| 23 |
+
eval_langs=["ell-Grek"],
|
| 24 |
+
main_score="accuracy",
|
| 25 |
+
form=["written"],
|
| 26 |
+
domains=["Legal"],
|
| 27 |
+
task_subtypes=["Topic classification"],
|
| 28 |
+
license="cc-by-4.0",
|
| 29 |
+
socioeconomic_status="high",
|
| 30 |
+
annotations_creators="human-annotated",
|
| 31 |
+
dialect=[],
|
| 32 |
+
text_creation="found",
|
| 33 |
+
bibtex_citation="""@inproceedings{papaloukas-etal-2021-glc,
|
| 34 |
+
title = "Multi-granular Legal Topic Classification on Greek Legislation",
|
| 35 |
+
author = "Papaloukas, Christos and Chalkidis, Ilias and Athinaios, Konstantinos and Pantazi, Despina-Athanasia and Koubarakis, Manolis",
|
| 36 |
+
booktitle = "Proceedings of the Natural Legal Language Processing Workshop 2021",
|
| 37 |
+
year = "2021",
|
| 38 |
+
address = "Punta Cana, Dominican Republic",
|
| 39 |
+
publisher = "Association for Computational Linguistics",
|
| 40 |
+
url = "https://arxiv.org/abs/2109.15298",
|
| 41 |
+
doi = "10.48550/arXiv.2109.15298",
|
| 42 |
+
pages = "63--75"
|
| 43 |
+
}
|
| 44 |
+
""",
|
| 45 |
+
n_samples={"validation": TEST_SAMPLES, "test": TEST_SAMPLES},
|
| 46 |
+
avg_character_length={"validation": 4046.8, "test": 4200.8},
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
def dataset_transform(self):
|
| 50 |
+
self.dataset["validation"] = (
|
| 51 |
+
self.dataset["validation"]
|
| 52 |
+
.shuffle(seed=self.seed)
|
| 53 |
+
.select(range(TEST_SAMPLES))
|
| 54 |
+
)
|
| 55 |
+
self.dataset["test"] = (
|
| 56 |
+
self.dataset["test"].shuffle(seed=self.seed).select(range(TEST_SAMPLES))
|
| 57 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/AmazonPolarityClassification.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class AmazonPolarityClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="AmazonPolarityClassification",
|
| 11 |
+
description="Amazon Polarity Classification Dataset.",
|
| 12 |
+
reference="https://huggingface.co/datasets/amazon_polarity",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "mteb/amazon_polarity",
|
| 15 |
+
"revision": "e2d317d38cd51312af73b3d32a06d1a08b442046",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=(
|
| 23 |
+
"2012-01-01",
|
| 24 |
+
"2015-12-31",
|
| 25 |
+
), # Estimated range for the collection of reviews
|
| 26 |
+
form=["written"],
|
| 27 |
+
domains=["Reviews"],
|
| 28 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 29 |
+
license="Not specified",
|
| 30 |
+
socioeconomic_status="mixed",
|
| 31 |
+
annotations_creators="derived",
|
| 32 |
+
dialect=[],
|
| 33 |
+
text_creation="found",
|
| 34 |
+
bibtex_citation="""@article{McAuley2013HiddenFA,
|
| 35 |
+
title={Hidden factors and hidden topics: understanding rating dimensions with review text},
|
| 36 |
+
author={Julian McAuley and Jure Leskovec},
|
| 37 |
+
journal={Proceedings of the 7th ACM conference on Recommender systems},
|
| 38 |
+
year={2013},
|
| 39 |
+
url={https://api.semanticscholar.org/CorpusID:6440341}
|
| 40 |
+
}""",
|
| 41 |
+
n_samples={"test": 400000},
|
| 42 |
+
avg_character_length={"test": 431.4},
|
| 43 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ArxivClassification.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ArxivClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="ArxivClassification",
|
| 11 |
+
description="Classification Dataset of Arxiv Papers",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "ccdv/arxiv-classification",
|
| 14 |
+
"revision": "f9bd92144ed76200d6eb3ce73a8bd4eba9ffdc85",
|
| 15 |
+
},
|
| 16 |
+
reference="https://ieeexplore.ieee.org/document/8675939",
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=("1998-11-11", "2019-03-28"),
|
| 23 |
+
form=["written"],
|
| 24 |
+
domains=["Academic"],
|
| 25 |
+
task_subtypes=["Topic classification"],
|
| 26 |
+
license="Not specified",
|
| 27 |
+
socioeconomic_status="high",
|
| 28 |
+
annotations_creators="derived",
|
| 29 |
+
dialect=[],
|
| 30 |
+
text_creation="found",
|
| 31 |
+
bibtex_citation="""@ARTICLE{8675939,
|
| 32 |
+
author={He, Jun and Wang, Liqun and Liu, Liu and Feng, Jiao and Wu, Hao},
|
| 33 |
+
journal={IEEE Access},
|
| 34 |
+
title={Long Document Classification From Local Word Glimpses via Recurrent Attention Learning},
|
| 35 |
+
year={2019},
|
| 36 |
+
volume={7},
|
| 37 |
+
number={},
|
| 38 |
+
pages={40707-40718},
|
| 39 |
+
doi={10.1109/ACCESS.2019.2907992}
|
| 40 |
+
}""",
|
| 41 |
+
n_samples={"test": 2048},
|
| 42 |
+
avg_character_length={},
|
| 43 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/Banking77Classification.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Banking77Classification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="Banking77Classification",
|
| 11 |
+
description="Dataset composed of online banking queries annotated with their corresponding intents.",
|
| 12 |
+
reference="https://arxiv.org/abs/2003.04807",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "mteb/banking77",
|
| 15 |
+
"revision": "0fd18e25b25c072e09e0d92ab615fda904d66300",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=(
|
| 23 |
+
"2019-01-01",
|
| 24 |
+
"2019-12-31",
|
| 25 |
+
), # Estimated range for the collection of queries
|
| 26 |
+
form=["written"],
|
| 27 |
+
domains=[],
|
| 28 |
+
task_subtypes=[],
|
| 29 |
+
license="MIT",
|
| 30 |
+
socioeconomic_status="mixed",
|
| 31 |
+
annotations_creators="human-annotated",
|
| 32 |
+
dialect=[],
|
| 33 |
+
text_creation="found",
|
| 34 |
+
bibtex_citation="""@inproceedings{casanueva-etal-2020-efficient,
|
| 35 |
+
title = "Efficient Intent Detection with Dual Sentence Encoders",
|
| 36 |
+
author = "Casanueva, I{\~n}igo and
|
| 37 |
+
Tem{\v{c}}inas, Tadas and
|
| 38 |
+
Gerz, Daniela and
|
| 39 |
+
Henderson, Matthew and
|
| 40 |
+
Vuli{\'c}, Ivan",
|
| 41 |
+
editor = "Wen, Tsung-Hsien and
|
| 42 |
+
Celikyilmaz, Asli and
|
| 43 |
+
Yu, Zhou and
|
| 44 |
+
Papangelis, Alexandros and
|
| 45 |
+
Eric, Mihail and
|
| 46 |
+
Kumar, Anuj and
|
| 47 |
+
Casanueva, I{\~n}igo and
|
| 48 |
+
Shah, Rushin",
|
| 49 |
+
booktitle = "Proceedings of the 2nd Workshop on Natural Language Processing for Conversational AI",
|
| 50 |
+
month = jul,
|
| 51 |
+
year = "2020",
|
| 52 |
+
address = "Online",
|
| 53 |
+
publisher = "Association for Computational Linguistics",
|
| 54 |
+
url = "https://aclanthology.org/2020.nlp4convai-1.5",
|
| 55 |
+
doi = "10.18653/v1/2020.nlp4convai-1.5",
|
| 56 |
+
pages = "38--45",
|
| 57 |
+
}""",
|
| 58 |
+
n_samples={"test": 3080},
|
| 59 |
+
avg_character_length={"test": 54.2},
|
| 60 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/DBpediaClassification.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class DBpediaClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="DBpediaClassification",
|
| 11 |
+
description="DBpedia14 is a dataset of English texts from Wikipedia articles, categorized into 14 non-overlapping classes based on their DBpedia ontology.",
|
| 12 |
+
reference="https://arxiv.org/abs/1509.01626",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "fancyzhx/dbpedia_14",
|
| 15 |
+
"revision": "9abd46cf7fc8b4c64290f26993c540b92aa145ac",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=("2022-01-25", "2022-01-25"),
|
| 23 |
+
form=["written"],
|
| 24 |
+
domains=["Encyclopaedic"],
|
| 25 |
+
task_subtypes=["Topic classification"],
|
| 26 |
+
license="cc-by-sa-3.0",
|
| 27 |
+
socioeconomic_status="low",
|
| 28 |
+
annotations_creators="derived",
|
| 29 |
+
dialect=[],
|
| 30 |
+
text_creation="found",
|
| 31 |
+
bibtex_citation="""
|
| 32 |
+
@inproceedings{NIPS2015_250cf8b5,
|
| 33 |
+
author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann},
|
| 34 |
+
booktitle = {Advances in Neural Information Processing Systems},
|
| 35 |
+
editor = {C. Cortes and N. Lawrence and D. Lee and M. Sugiyama and R. Garnett},
|
| 36 |
+
pages = {},
|
| 37 |
+
publisher = {Curran Associates, Inc.},
|
| 38 |
+
title = {Character-level Convolutional Networks for Text Classification},
|
| 39 |
+
url = {https://proceedings.neurips.cc/paper_files/paper/2015/file/250cf8b51c773f3f8dc8b4be867a9a02-Paper.pdf},
|
| 40 |
+
volume = {28},
|
| 41 |
+
year = {2015}
|
| 42 |
+
}
|
| 43 |
+
""",
|
| 44 |
+
n_samples={"test": 70000},
|
| 45 |
+
avg_character_length={"test": 281.40},
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
def dataset_transform(self):
|
| 49 |
+
self.dataset = self.dataset.rename_column("content", "text")
|
| 50 |
+
self.dataset = self.stratified_subsampling(
|
| 51 |
+
self.dataset, seed=self.seed, splits=["train", "test"]
|
| 52 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/EmotionClassification.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class EmotionClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="EmotionClassification",
|
| 11 |
+
description="Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise.",
|
| 12 |
+
reference="https://www.aclweb.org/anthology/D18-1404",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "mteb/emotion",
|
| 15 |
+
"revision": "4f58c6b202a23cf9a4da393831edf4f9183cad37",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["validation", "test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=(
|
| 23 |
+
"2017-01-01",
|
| 24 |
+
"2018-12-31",
|
| 25 |
+
), # Estimated range for the collection of Twitter messages
|
| 26 |
+
form=["written"],
|
| 27 |
+
domains=["Social"],
|
| 28 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 29 |
+
license="Not specified",
|
| 30 |
+
socioeconomic_status="mixed",
|
| 31 |
+
annotations_creators="human-annotated",
|
| 32 |
+
dialect=[],
|
| 33 |
+
text_creation="found",
|
| 34 |
+
bibtex_citation="""@inproceedings{saravia-etal-2018-carer,
|
| 35 |
+
title = "{CARER}: Contextualized Affect Representations for Emotion Recognition",
|
| 36 |
+
author = "Saravia, Elvis and
|
| 37 |
+
Liu, Hsien-Chi Toby and
|
| 38 |
+
Huang, Yen-Hao and
|
| 39 |
+
Wu, Junlin and
|
| 40 |
+
Chen, Yi-Shin",
|
| 41 |
+
editor = "Riloff, Ellen and
|
| 42 |
+
Chiang, David and
|
| 43 |
+
Hockenmaier, Julia and
|
| 44 |
+
Tsujii, Jun{'}ichi",
|
| 45 |
+
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
|
| 46 |
+
month = oct # "-" # nov,
|
| 47 |
+
year = "2018",
|
| 48 |
+
address = "Brussels, Belgium",
|
| 49 |
+
publisher = "Association for Computational Linguistics",
|
| 50 |
+
url = "https://aclanthology.org/D18-1404",
|
| 51 |
+
doi = "10.18653/v1/D18-1404",
|
| 52 |
+
pages = "3687--3697",
|
| 53 |
+
abstract = "Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.",
|
| 54 |
+
}""",
|
| 55 |
+
n_samples={"validation": 2000, "test": 2000},
|
| 56 |
+
avg_character_length={"validation": 95.3, "test": 95.6},
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
@property
|
| 60 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 61 |
+
metadata_dict = super().metadata_dict
|
| 62 |
+
metadata_dict["n_experiments"] = 10
|
| 63 |
+
metadata_dict["samples_per_label"] = 16
|
| 64 |
+
return metadata_dict
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FinancialPhrasebankClassification.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class FinancialPhrasebankClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="FinancialPhrasebankClassification",
|
| 11 |
+
description="Polar sentiment dataset of sentences from financial news, categorized by sentiment into positive, negative, or neutral.",
|
| 12 |
+
reference="https://arxiv.org/abs/1307.5336",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "takala/financial_phrasebank",
|
| 15 |
+
"revision": "1484d06fe7af23030c7c977b12556108d1f67039",
|
| 16 |
+
"name": "sentences_allagree",
|
| 17 |
+
},
|
| 18 |
+
type="Classification",
|
| 19 |
+
category="s2s",
|
| 20 |
+
eval_splits=["train"],
|
| 21 |
+
eval_langs=["eng-Latn"],
|
| 22 |
+
main_score="accuracy",
|
| 23 |
+
date=("2013-11-01", "2013-11-01"),
|
| 24 |
+
form=["written"],
|
| 25 |
+
domains=["News"],
|
| 26 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 27 |
+
license="cc-by-nc-sa-3.0",
|
| 28 |
+
socioeconomic_status="medium",
|
| 29 |
+
annotations_creators="expert-annotated",
|
| 30 |
+
dialect=[],
|
| 31 |
+
text_creation="found",
|
| 32 |
+
bibtex_citation="""
|
| 33 |
+
@article{Malo2014GoodDO,
|
| 34 |
+
title={Good debt or bad debt: Detecting semantic orientations in economic texts},
|
| 35 |
+
author={P. Malo and A. Sinha and P. Korhonen and J. Wallenius and P. Takala},
|
| 36 |
+
journal={Journal of the Association for Information Science and Technology},
|
| 37 |
+
year={2014},
|
| 38 |
+
volume={65}
|
| 39 |
+
}
|
| 40 |
+
""",
|
| 41 |
+
n_samples={"train": 4840},
|
| 42 |
+
avg_character_length={"train": 121.96},
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def dataset_transform(self):
|
| 46 |
+
self.dataset = self.dataset.rename_column("sentence", "text")
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/FrenkEnClassification.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class FrenkEnClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="FrenkEnClassification",
|
| 10 |
+
description="English subset of the FRENK dataset",
|
| 11 |
+
dataset={
|
| 12 |
+
"path": "classla/FRENK-hate-en",
|
| 13 |
+
"revision": "52483dba0ff23291271ee9249839865e3c3e7e50",
|
| 14 |
+
},
|
| 15 |
+
reference="https://arxiv.org/abs/1906.02045",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["eng-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2021-05-28", "2021-05-28"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Social"],
|
| 24 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 25 |
+
license="Not specified",
|
| 26 |
+
socioeconomic_status="low",
|
| 27 |
+
annotations_creators="derived",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@misc{ljubešić2019frenk,
|
| 31 |
+
title={The FRENK Datasets of Socially Unacceptable Discourse in Slovene and English},
|
| 32 |
+
author={Nikola Ljubešić and Darja Fišer and Tomaž Erjavec},
|
| 33 |
+
year={2019},
|
| 34 |
+
eprint={1906.02045},
|
| 35 |
+
archivePrefix={arXiv},
|
| 36 |
+
primaryClass={cs.CL},
|
| 37 |
+
url={https://arxiv.org/abs/1906.02045}
|
| 38 |
+
}""",
|
| 39 |
+
n_samples={"test": 2300},
|
| 40 |
+
avg_character_length={"test": 188.75},
|
| 41 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ImdbClassification.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ImdbClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="ImdbClassification",
|
| 11 |
+
description="Large Movie Review Dataset",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "mteb/imdb",
|
| 14 |
+
"revision": "3d86128a09e091d6018b6d26cad27f2739fc2db7",
|
| 15 |
+
},
|
| 16 |
+
reference="http://www.aclweb.org/anthology/P11-1015",
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="p2p",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=(
|
| 23 |
+
"2000-01-01",
|
| 24 |
+
"2010-12-31",
|
| 25 |
+
), # Estimated range for the collection of movie reviews
|
| 26 |
+
form=["written"],
|
| 27 |
+
domains=["Reviews"],
|
| 28 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 29 |
+
license="Not specified",
|
| 30 |
+
socioeconomic_status="mixed",
|
| 31 |
+
annotations_creators="derived",
|
| 32 |
+
dialect=[],
|
| 33 |
+
text_creation="found",
|
| 34 |
+
bibtex_citation="""@inproceedings{maas-etal-2011-learning,
|
| 35 |
+
title = "Learning Word Vectors for Sentiment Analysis",
|
| 36 |
+
author = "Maas, Andrew L. and
|
| 37 |
+
Daly, Raymond E. and
|
| 38 |
+
Pham, Peter T. and
|
| 39 |
+
Huang, Dan and
|
| 40 |
+
Ng, Andrew Y. and
|
| 41 |
+
Potts, Christopher",
|
| 42 |
+
editor = "Lin, Dekang and
|
| 43 |
+
Matsumoto, Yuji and
|
| 44 |
+
Mihalcea, Rada",
|
| 45 |
+
booktitle = "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
|
| 46 |
+
month = jun,
|
| 47 |
+
year = "2011",
|
| 48 |
+
address = "Portland, Oregon, USA",
|
| 49 |
+
publisher = "Association for Computational Linguistics",
|
| 50 |
+
url = "https://aclanthology.org/P11-1015",
|
| 51 |
+
pages = "142--150",
|
| 52 |
+
}""",
|
| 53 |
+
n_samples={"test": 25000},
|
| 54 |
+
avg_character_length={"test": 1293.8},
|
| 55 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/LegalBenchClassification.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/NewsClassification.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class NewsClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="NewsClassification",
|
| 11 |
+
description="Large News Classification Dataset",
|
| 12 |
+
dataset={
|
| 13 |
+
"path": "fancyzhx/ag_news",
|
| 14 |
+
"revision": "eb185aade064a813bc0b7f42de02595523103ca4",
|
| 15 |
+
},
|
| 16 |
+
reference="https://arxiv.org/abs/1509.01626",
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=(
|
| 23 |
+
"2004-01-01",
|
| 24 |
+
"2015-12-31",
|
| 25 |
+
), # Estimated range for the collection of news articles
|
| 26 |
+
form=["written"],
|
| 27 |
+
domains=["News"],
|
| 28 |
+
task_subtypes=["Topic classification"],
|
| 29 |
+
license="Apache 2.0",
|
| 30 |
+
socioeconomic_status="medium",
|
| 31 |
+
annotations_creators="expert-annotated",
|
| 32 |
+
dialect=["eng-Latn-US", "en-Latn-GB", "en-Latn-AU"],
|
| 33 |
+
text_creation="found",
|
| 34 |
+
bibtex_citation="",
|
| 35 |
+
n_samples={"test": 7600},
|
| 36 |
+
avg_character_length={"test": 235.29},
|
| 37 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/PatentClassification.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class PatentClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="PatentClassification",
|
| 10 |
+
description="Classification Dataset of Patents and Abstract",
|
| 11 |
+
dataset={
|
| 12 |
+
"path": "ccdv/patent-classification",
|
| 13 |
+
"revision": "2f38a1dfdecfacee0184d74eaeafd3c0fb49d2a6",
|
| 14 |
+
},
|
| 15 |
+
reference="https://aclanthology.org/P19-1212.pdf",
|
| 16 |
+
type="Classification",
|
| 17 |
+
category="s2s",
|
| 18 |
+
eval_splits=["test"],
|
| 19 |
+
eval_langs=["eng-Latn"],
|
| 20 |
+
main_score="accuracy",
|
| 21 |
+
date=("2021-11-05", "2022-10-22"),
|
| 22 |
+
form=["written"],
|
| 23 |
+
domains=["Legal"],
|
| 24 |
+
task_subtypes=["Topic classification"],
|
| 25 |
+
license="Not specified",
|
| 26 |
+
socioeconomic_status="high",
|
| 27 |
+
annotations_creators="derived",
|
| 28 |
+
dialect=[],
|
| 29 |
+
text_creation="found",
|
| 30 |
+
bibtex_citation="""@inproceedings{sharma-etal-2019-bigpatent,
|
| 31 |
+
title = "{BIGPATENT}: A Large-Scale Dataset for Abstractive and Coherent Summarization",
|
| 32 |
+
author = "Sharma, Eva and
|
| 33 |
+
Li, Chen and
|
| 34 |
+
Wang, Lu",
|
| 35 |
+
editor = "Korhonen, Anna and
|
| 36 |
+
Traum, David and
|
| 37 |
+
M{\`a}rquez, Llu{\'\i}s",
|
| 38 |
+
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
|
| 39 |
+
month = jul,
|
| 40 |
+
year = "2019",
|
| 41 |
+
address = "Florence, Italy",
|
| 42 |
+
publisher = "Association for Computational Linguistics",
|
| 43 |
+
url = "https://aclanthology.org/P19-1212",
|
| 44 |
+
doi = "10.18653/v1/P19-1212",
|
| 45 |
+
pages = "2204--2213",
|
| 46 |
+
abstract = "Most existing text summarization datasets are compiled from the news domain, where summaries have a flattened discourse structure. In such datasets, summary-worthy content often appears in the beginning of input articles. Moreover, large segments from input articles are present verbatim in their respective summaries. These issues impede the learning and evaluation of systems that can understand an article{'}s global content structure as well as produce abstractive summaries with high compression ratio. In this work, we present a novel dataset, BIGPATENT, consisting of 1.3 million records of U.S. patent documents along with human written abstractive summaries. Compared to existing summarization datasets, BIGPATENT has the following properties: i) summaries contain a richer discourse structure with more recurring entities, ii) salient content is evenly distributed in the input, and iii) lesser and shorter extractive fragments are present in the summaries. Finally, we train and evaluate baselines and popular learning models on BIGPATENT to shed light on new challenges and motivate future directions for summarization research.",
|
| 47 |
+
}""",
|
| 48 |
+
n_samples={"test": 5000},
|
| 49 |
+
avg_character_length={"test": 18620.44},
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
def dataset_transform(self):
|
| 53 |
+
self.dataset = self.stratified_subsampling(
|
| 54 |
+
self.dataset, seed=self.seed, splits=["test"]
|
| 55 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicChatClassification.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
_EVAL_SPLITS = ["test"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ToxicChatClassification(AbsTaskClassification):
|
| 10 |
+
metadata = TaskMetadata(
|
| 11 |
+
name="ToxicChatClassification",
|
| 12 |
+
description="""This dataset contains toxicity annotations on 10K user
|
| 13 |
+
prompts collected from the Vicuna online demo. We utilize a human-AI
|
| 14 |
+
collaborative annotation framework to guarantee the quality of annotation
|
| 15 |
+
while maintaining a feasible annotation workload. The details of data
|
| 16 |
+
collection, pre-processing, and annotation can be found in our paper.
|
| 17 |
+
We believe that ToxicChat can be a valuable resource to drive further
|
| 18 |
+
advancements toward building a safe and healthy environment for user-AI
|
| 19 |
+
interactions.
|
| 20 |
+
Only human annotated samples are selected here.""",
|
| 21 |
+
reference="https://aclanthology.org/2023.findings-emnlp.311/",
|
| 22 |
+
dataset={
|
| 23 |
+
"path": "lmsys/toxic-chat",
|
| 24 |
+
"name": "toxicchat0124",
|
| 25 |
+
"revision": "3e0319203c7162b9c9f8015b594441f979c199bc",
|
| 26 |
+
},
|
| 27 |
+
type="Classification",
|
| 28 |
+
category="s2s",
|
| 29 |
+
eval_splits=_EVAL_SPLITS,
|
| 30 |
+
eval_langs=["eng-Latn"],
|
| 31 |
+
main_score="accuracy",
|
| 32 |
+
date=("2023-10-26", "2024-01-31"),
|
| 33 |
+
form=["written"],
|
| 34 |
+
domains=["Constructed"],
|
| 35 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 36 |
+
license="cc-by-4.0",
|
| 37 |
+
socioeconomic_status="high",
|
| 38 |
+
annotations_creators="expert-annotated",
|
| 39 |
+
dialect=[],
|
| 40 |
+
text_creation="found",
|
| 41 |
+
bibtex_citation="""@misc{lin2023toxicchat,
|
| 42 |
+
title={ToxicChat: Unveiling Hidden Challenges of Toxicity Detection in Real-World User-AI Conversation},
|
| 43 |
+
author={Zi Lin and Zihan Wang and Yongqi Tong and Yangkun Wang and Yuxin Guo and Yujia Wang and Jingbo Shang},
|
| 44 |
+
year={2023},
|
| 45 |
+
eprint={2310.17389},
|
| 46 |
+
archivePrefix={arXiv},
|
| 47 |
+
primaryClass={cs.CL}
|
| 48 |
+
}""",
|
| 49 |
+
n_samples={"test": 1427},
|
| 50 |
+
avg_character_length={"test": 189.4},
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
def dataset_transform(self):
|
| 54 |
+
keep_cols = ["user_input", "toxicity"]
|
| 55 |
+
rename_dict = dict(zip(keep_cols, ["text", "label"]))
|
| 56 |
+
remove_cols = [
|
| 57 |
+
col
|
| 58 |
+
for col in self.dataset[_EVAL_SPLITS[0]].column_names
|
| 59 |
+
if col not in keep_cols
|
| 60 |
+
]
|
| 61 |
+
self.dataset = self.dataset.rename_columns(rename_dict)
|
| 62 |
+
self.dataset = self.stratified_subsampling(
|
| 63 |
+
self.dataset, seed=self.seed, splits=["test"]
|
| 64 |
+
)
|
| 65 |
+
# only use human-annotated data
|
| 66 |
+
self.dataset = self.dataset.filter(lambda x: x["human_annotation"])
|
| 67 |
+
self.dataset = self.dataset.remove_columns(remove_cols)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/ToxicConversationsClassification.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ToxicConversationsClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="ToxicConversationsClassification",
|
| 11 |
+
description="Collection of comments from the Civil Comments platform together with annotations if the comment is toxic or not.",
|
| 12 |
+
reference="https://www.kaggle.com/competitions/jigsaw-unintended-bias-in-toxicity-classification/overview",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "mteb/toxic_conversations_50k",
|
| 15 |
+
"revision": "edfaf9da55d3dd50d43143d90c1ac476895ae6de",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=(
|
| 23 |
+
"2017-01-01",
|
| 24 |
+
"2018-12-31",
|
| 25 |
+
), # Estimated range for the collection of comments
|
| 26 |
+
form=["written"],
|
| 27 |
+
domains=["Social"],
|
| 28 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 29 |
+
license="CC BY 4.0",
|
| 30 |
+
socioeconomic_status="mixed",
|
| 31 |
+
annotations_creators="human-annotated",
|
| 32 |
+
dialect=[],
|
| 33 |
+
text_creation="found",
|
| 34 |
+
bibtex_citation="""@misc{jigsaw-unintended-bias-in-toxicity-classification,
|
| 35 |
+
author = {cjadams, Daniel Borkan, inversion, Jeffrey Sorensen, Lucas Dixon, Lucy Vasserman, nithum},
|
| 36 |
+
title = {Jigsaw Unintended Bias in Toxicity Classification},
|
| 37 |
+
publisher = {Kaggle},
|
| 38 |
+
year = {2019},
|
| 39 |
+
url = {https://kaggle.com/competitions/jigsaw-unintended-bias-in-toxicity-classification}
|
| 40 |
+
}""",
|
| 41 |
+
n_samples={"test": 50000},
|
| 42 |
+
avg_character_length={"test": 296.6},
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 47 |
+
metadata_dict = super().metadata_dict
|
| 48 |
+
metadata_dict["n_experiments"] = 10
|
| 49 |
+
metadata_dict["samples_per_label"] = 16
|
| 50 |
+
return metadata_dict
|
| 51 |
+
|
| 52 |
+
def dataset_transform(self):
|
| 53 |
+
self.dataset = self.stratified_subsampling(
|
| 54 |
+
self.dataset, seed=self.seed, splits=["test"]
|
| 55 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetSentimentExtractionClassification.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 4 |
+
|
| 5 |
+
from ....abstasks import AbsTaskClassification
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TweetSentimentExtractionClassification(AbsTaskClassification):
|
| 9 |
+
metadata = TaskMetadata(
|
| 10 |
+
name="TweetSentimentExtractionClassification",
|
| 11 |
+
description="",
|
| 12 |
+
reference="https://www.kaggle.com/competitions/tweet-sentiment-extraction/overview",
|
| 13 |
+
dataset={
|
| 14 |
+
"path": "mteb/tweet_sentiment_extraction",
|
| 15 |
+
"revision": "d604517c81ca91fe16a244d1248fc021f9ecee7a",
|
| 16 |
+
},
|
| 17 |
+
type="Classification",
|
| 18 |
+
category="s2s",
|
| 19 |
+
eval_splits=["test"],
|
| 20 |
+
eval_langs=["eng-Latn"],
|
| 21 |
+
main_score="accuracy",
|
| 22 |
+
date=(
|
| 23 |
+
"2020-01-01",
|
| 24 |
+
"2020-12-31",
|
| 25 |
+
), # Estimated range for the collection of tweets
|
| 26 |
+
form=["written"],
|
| 27 |
+
domains=["Social"],
|
| 28 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 29 |
+
license="Not specified",
|
| 30 |
+
socioeconomic_status="mixed",
|
| 31 |
+
annotations_creators="human-annotated",
|
| 32 |
+
dialect=[],
|
| 33 |
+
text_creation="found",
|
| 34 |
+
bibtex_citation="""@misc{tweet-sentiment-extraction,
|
| 35 |
+
author = {Maggie, Phil Culliton, Wei Chen},
|
| 36 |
+
title = {Tweet Sentiment Extraction},
|
| 37 |
+
publisher = {Kaggle},
|
| 38 |
+
year = {2020},
|
| 39 |
+
url = {https://kaggle.com/competitions/tweet-sentiment-extraction}
|
| 40 |
+
}""",
|
| 41 |
+
n_samples={"test": 3534},
|
| 42 |
+
avg_character_length={"test": 67.8},
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def metadata_dict(self) -> dict[str, str]:
|
| 47 |
+
metadata_dict = dict(self.metadata)
|
| 48 |
+
metadata_dict["n_experiments"] = 10
|
| 49 |
+
metadata_dict["samples_per_label"] = 32
|
| 50 |
+
return metadata_dict
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/eng/TweetTopicSingleClassification.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TweetTopicSingleClassification(AbsTaskClassification):
|
| 8 |
+
metadata = TaskMetadata(
|
| 9 |
+
name="TweetTopicSingleClassification",
|
| 10 |
+
description="""Topic classification dataset on Twitter with 6 labels. Each instance of
|
| 11 |
+
TweetTopic comes with a timestamp which distributes from September 2019 to August 2021.
|
| 12 |
+
Tweets were preprocessed before the annotation to normalize some artifacts, converting
|
| 13 |
+
URLs into a special token {{URL}} and non-verified usernames into {{USERNAME}}. For verified
|
| 14 |
+
usernames, we replace its display name (or account name) with symbols {@}.
|
| 15 |
+
""",
|
| 16 |
+
dataset={
|
| 17 |
+
"path": "cardiffnlp/tweet_topic_single",
|
| 18 |
+
"revision": "87b7a0d1c402dbb481db649569c556d9aa27ac05",
|
| 19 |
+
},
|
| 20 |
+
reference="https://arxiv.org/abs/2209.09824",
|
| 21 |
+
type="Classification",
|
| 22 |
+
category="s2s",
|
| 23 |
+
eval_splits=["test_2021"],
|
| 24 |
+
eval_langs=["eng-Latn"],
|
| 25 |
+
main_score="accuracy",
|
| 26 |
+
date=("2019-09-01", "2021-08-31"),
|
| 27 |
+
form=["written"],
|
| 28 |
+
domains=["Social", "News"],
|
| 29 |
+
task_subtypes=["Topic classification"],
|
| 30 |
+
license="Other",
|
| 31 |
+
socioeconomic_status="medium",
|
| 32 |
+
annotations_creators="expert-annotated",
|
| 33 |
+
dialect=[],
|
| 34 |
+
text_creation="found",
|
| 35 |
+
bibtex_citation="""
|
| 36 |
+
@inproceedings{dimosthenis-etal-2022-twitter,
|
| 37 |
+
title = "{T}witter {T}opic {C}lassification",
|
| 38 |
+
author = "Antypas, Dimosthenis and
|
| 39 |
+
Ushio, Asahi and
|
| 40 |
+
Camacho-Collados, Jose and
|
| 41 |
+
Neves, Leonardo and
|
| 42 |
+
Silva, Vitor and
|
| 43 |
+
Barbieri, Francesco",
|
| 44 |
+
booktitle = "Proceedings of the 29th International Conference on Computational Linguistics",
|
| 45 |
+
month = oct,
|
| 46 |
+
year = "2022",
|
| 47 |
+
address = "Gyeongju, Republic of Korea",
|
| 48 |
+
publisher = "International Committee on Computational Linguistics"
|
| 49 |
+
}
|
| 50 |
+
""",
|
| 51 |
+
n_samples={"test_2021": 1693},
|
| 52 |
+
avg_character_length={"test_2021": 167.66},
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def dataset_transform(self):
|
| 56 |
+
self.dataset["train"] = self.dataset["train_2021"]
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fas/__init__.py
ADDED
|
File without changes
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoHateSpeechClassification.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from mteb.abstasks import AbsTaskClassification
|
| 4 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 5 |
+
|
| 6 |
+
TEST_SAMPLES = 2048
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class FilipinoHateSpeechClassification(AbsTaskClassification):
|
| 10 |
+
metadata = TaskMetadata(
|
| 11 |
+
name="FilipinoHateSpeechClassification",
|
| 12 |
+
description="Filipino Twitter dataset for sentiment classification.",
|
| 13 |
+
reference="https://pcj.csp.org.ph/index.php/pcj/issue/download/29/PCJ%20V14%20N1%20pp1-14%202019",
|
| 14 |
+
dataset={
|
| 15 |
+
"path": "hate_speech_filipino",
|
| 16 |
+
"revision": "1994e9bb7f3ec07518e3f0d9e870cb293e234686",
|
| 17 |
+
},
|
| 18 |
+
type="Classification",
|
| 19 |
+
category="s2s",
|
| 20 |
+
date=("2019-08-01", "2019-08-01"),
|
| 21 |
+
eval_splits=["validation", "test"],
|
| 22 |
+
eval_langs=["fil-Latn"],
|
| 23 |
+
main_score="accuracy",
|
| 24 |
+
form=["written"],
|
| 25 |
+
domains=["Social"],
|
| 26 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 27 |
+
license="Not specified",
|
| 28 |
+
socioeconomic_status="mixed",
|
| 29 |
+
annotations_creators="human-annotated",
|
| 30 |
+
dialect=[],
|
| 31 |
+
text_creation="found",
|
| 32 |
+
bibtex_citation="""
|
| 33 |
+
@article{Cabasag-2019-hate-speech,
|
| 34 |
+
title={Hate speech in Philippine election-related tweets: Automatic detection and classification using natural language processing.},
|
| 35 |
+
author={Neil Vicente Cabasag, Vicente Raphael Chan, Sean Christian Lim, Mark Edward Gonzales, and Charibeth Cheng},
|
| 36 |
+
journal={Philippine Computing Journal},
|
| 37 |
+
volume={XIV},
|
| 38 |
+
number={1},
|
| 39 |
+
month={August},
|
| 40 |
+
year={2019}
|
| 41 |
+
}
|
| 42 |
+
""",
|
| 43 |
+
n_samples={"validation": TEST_SAMPLES, "test": TEST_SAMPLES},
|
| 44 |
+
avg_character_length={"validation": 88.1, "test": 87.4},
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def dataset_transform(self):
|
| 48 |
+
self.dataset = self.stratified_subsampling(
|
| 49 |
+
self.dataset, seed=self.seed, splits=["validation", "test"]
|
| 50 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/fil/FilipinoShopeeReviewsClassification.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
| 2 |
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class FilipinoShopeeReviewsClassification(AbsTaskClassification):
|
| 6 |
+
metadata = TaskMetadata(
|
| 7 |
+
name="FilipinoShopeeReviewsClassification",
|
| 8 |
+
description="The Shopee reviews tl 15 dataset is constructed by randomly taking 2100 training samples and 450 samples for testing and validation for each review star from 1 to 5. In total, there are 10500 training samples and 2250 each in validation and testing samples.",
|
| 9 |
+
reference="https://uijrt.com/articles/v4/i8/UIJRTV4I80009.pdf",
|
| 10 |
+
dataset={
|
| 11 |
+
"path": "scaredmeow/shopee-reviews-tl-stars",
|
| 12 |
+
"revision": "d096f402fdc76886458c0cfb5dedc829bea2b935",
|
| 13 |
+
},
|
| 14 |
+
type="Classification",
|
| 15 |
+
task_subtypes=["Sentiment/Hate speech"],
|
| 16 |
+
category="s2s",
|
| 17 |
+
eval_splits=["validation", "test"],
|
| 18 |
+
eval_langs=["fil-Latn"],
|
| 19 |
+
form=["written"],
|
| 20 |
+
domains=["Social"],
|
| 21 |
+
license="MPL-2.0",
|
| 22 |
+
socioeconomic_status="mixed",
|
| 23 |
+
annotations_creators="human-annotated",
|
| 24 |
+
dialect=[],
|
| 25 |
+
text_creation="found",
|
| 26 |
+
date=("2022-05-13", "2023-05-13"),
|
| 27 |
+
main_score="accuracy",
|
| 28 |
+
bibtex_citation="""
|
| 29 |
+
@article{riegoenhancement,
|
| 30 |
+
title={Enhancement to Low-Resource Text Classification via Sequential Transfer Learning},
|
| 31 |
+
author={Riego, Neil Christian R. and Villarba, Danny Bell and Sison, Ariel Antwaun Rolando C. and Pineda, Fernandez C. and Lagunzad, Herminiño C.}
|
| 32 |
+
journal={United International Journal for Research & Technology},
|
| 33 |
+
volume={04},
|
| 34 |
+
issue={08},
|
| 35 |
+
pages={72--82}
|
| 36 |
+
}""",
|
| 37 |
+
n_samples={"validation": 2250, "test": 2250},
|
| 38 |
+
avg_character_length={"validation": 143.8, "test": 145.1},
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def dataset_transform(self):
|
| 42 |
+
self.dataset = self.stratified_subsampling(
|
| 43 |
+
self.dataset, seed=self.seed, splits=["validation", "test"]
|
| 44 |
+
)
|
testbed/embeddings-benchmark__mteb/mteb/tasks/Classification/por/__init__.py
ADDED
|
File without changes
|