|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os
|
|
|
|
|
|
import datasets
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\
|
|
|
This dataset is compiled from TED talk subtitles and distributed through phontron.com. The package here includes the training data only (development and test data are not included in this package). The transcripts have been translated by a global community of volunteers to more than 100 languages. The parallel corpus and the code fopr cerating it is available from https://www.ted.com/participate/translate
|
|
|
Note that this corpus is tokenized in its original form. For this package we applied automatic de-tokenization using the moses tools. This is not perfect and may miss some de-tokenization steps.
|
|
|
|
|
|
59 languages, 1711 bitexts
|
|
|
total number of files: 59949
|
|
|
total number of tokens: 108.23M
|
|
|
total number of sentence fragments: 6.45M
|
|
|
"""
|
|
|
_HOMEPAGE_URL = "https://opus.nlpl.eu/NeuLab-TedTalks.php"
|
|
|
_CITATION = """\
|
|
|
J. Tiedemann, 2012, Parallel Data, Tools and Interfaces in OPUS. In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC 2012)
|
|
|
"""
|
|
|
|
|
|
_VERSION = "1.0.0"
|
|
|
_BASE_NAME = "NeuLab-TedTalks.{}.{}"
|
|
|
|
|
|
_LANGUAGE_PAIRS = {
|
|
|
("ar", "en"): "https://object.pouta.csc.fi/OPUS-NeuLab-TedTalks/v1/moses/ar-en.txt.zip",
|
|
|
}
|
|
|
|
|
|
|
|
|
class NeuLabTedTalksConfig(datasets.BuilderConfig):
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
|
|
|
|
|
class NeuLabTedTalks(datasets.GeneratorBasedBuilder):
|
|
|
BUILDER_CONFIGS = [
|
|
|
NeuLabTedTalksConfig(
|
|
|
name=f"{lang1}-{lang2}",
|
|
|
description=f"Translating {lang1} to {lang2} or vice versa",
|
|
|
version=datasets.Version(_VERSION),
|
|
|
)
|
|
|
for lang1, lang2 in _LANGUAGE_PAIRS.keys()
|
|
|
]
|
|
|
BUILDER_CONFIG_CLASS = NeuLabTedTalksConfig
|
|
|
|
|
|
def _info(self):
|
|
|
return datasets.DatasetInfo(
|
|
|
description=_DESCRIPTION,
|
|
|
features=datasets.Features(
|
|
|
{
|
|
|
"id": datasets.Value("string"),
|
|
|
"translation": datasets.Translation(languages=tuple(self.config.name.split("-"))),
|
|
|
},
|
|
|
),
|
|
|
supervised_keys=None,
|
|
|
homepage=_HOMEPAGE_URL,
|
|
|
citation=_CITATION,
|
|
|
)
|
|
|
|
|
|
def _split_generators(self, dl_manager):
|
|
|
download_url = _LANGUAGE_PAIRS.get(tuple(self.config.name.split("-")))
|
|
|
path = dl_manager.download_and_extract(download_url)
|
|
|
return [
|
|
|
datasets.SplitGenerator(
|
|
|
name=datasets.Split.TRAIN,
|
|
|
gen_kwargs={"datapath": path},
|
|
|
)
|
|
|
]
|
|
|
|
|
|
def _generate_examples(self, datapath):
|
|
|
l1, l2 = self.config.name.split("-")
|
|
|
l1_file = _BASE_NAME.format(self.config.name, l1)
|
|
|
l2_file = _BASE_NAME.format(self.config.name, l2)
|
|
|
l1_path = os.path.join(datapath, l1_file)
|
|
|
l2_path = os.path.join(datapath, l2_file)
|
|
|
|
|
|
with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
|
|
|
for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
|
|
|
x = x.strip()
|
|
|
y = y.strip()
|
|
|
result = (
|
|
|
sentence_counter,
|
|
|
{
|
|
|
"id": str(sentence_counter),
|
|
|
"translation": {l1: x, l2: y},
|
|
|
},
|
|
|
)
|
|
|
yield result |