gimmeursocks commited on
Commit
f73bba4
·
verified ·
1 Parent(s): a20a14e

Upload NeuLab-TedTalks.py

Browse files
Files changed (1) hide show
  1. NeuLab-TedTalks.py +102 -0
NeuLab-TedTalks.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ _DESCRIPTION = """\
23
+ This dataset is compiled from TED talk subtitles and distributed through phontron.com. The package here includes the training data only (development and test data are not included in this package). The transcripts have been translated by a global community of volunteers to more than 100 languages. The parallel corpus and the code fopr cerating it is available from https://www.ted.com/participate/translate
24
+ Note that this corpus is tokenized in its original form. For this package we applied automatic de-tokenization using the moses tools. This is not perfect and may miss some de-tokenization steps.
25
+
26
+ 59 languages, 1711 bitexts
27
+ total number of files: 59949
28
+ total number of tokens: 108.23M
29
+ total number of sentence fragments: 6.45M
30
+ """
31
+ _HOMEPAGE_URL = "https://opus.nlpl.eu/NeuLab-TedTalks.php"
32
+ _CITATION = """\
33
+ J. Tiedemann, 2012, Parallel Data, Tools and Interfaces in OPUS. In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC 2012)
34
+ """
35
+
36
+ _VERSION = "1.0.0"
37
+ _BASE_NAME = "NeuLab-TedTalks.{}.{}"
38
+
39
+ _LANGUAGE_PAIRS = {
40
+ ("ar", "en"): "https://object.pouta.csc.fi/OPUS-NeuLab-TedTalks/v1/moses/ar-en.txt.zip",
41
+ }
42
+
43
+
44
+ class NeuLabTedTalksConfig(datasets.BuilderConfig):
45
+ def __init__(self, *args, **kwargs):
46
+ super().__init__(*args, **kwargs)
47
+
48
+
49
+ class NeuLabTedTalks(datasets.GeneratorBasedBuilder):
50
+ BUILDER_CONFIGS = [
51
+ NeuLabTedTalksConfig(
52
+ name=f"{lang1}-{lang2}",
53
+ description=f"Translating {lang1} to {lang2} or vice versa",
54
+ version=datasets.Version(_VERSION),
55
+ )
56
+ for lang1, lang2 in _LANGUAGE_PAIRS.keys()
57
+ ]
58
+ BUILDER_CONFIG_CLASS = NeuLabTedTalksConfig
59
+
60
+ def _info(self):
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=datasets.Features(
64
+ {
65
+ "id": datasets.Value("string"),
66
+ "translation": datasets.Translation(languages=tuple(self.config.name.split("-"))),
67
+ },
68
+ ),
69
+ supervised_keys=None,
70
+ homepage=_HOMEPAGE_URL,
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ download_url = _LANGUAGE_PAIRS.get(tuple(self.config.name.split("-")))
76
+ path = dl_manager.download_and_extract(download_url)
77
+ return [
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TRAIN,
80
+ gen_kwargs={"datapath": path},
81
+ )
82
+ ]
83
+
84
+ def _generate_examples(self, datapath):
85
+ l1, l2 = self.config.name.split("-")
86
+ l1_file = _BASE_NAME.format(self.config.name, l1)
87
+ l2_file = _BASE_NAME.format(self.config.name, l2)
88
+ l1_path = os.path.join(datapath, l1_file)
89
+ l2_path = os.path.join(datapath, l2_file)
90
+
91
+ with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
92
+ for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
93
+ x = x.strip()
94
+ y = y.strip()
95
+ result = (
96
+ sentence_counter,
97
+ {
98
+ "id": str(sentence_counter),
99
+ "translation": {l1: x, l2: y},
100
+ },
101
+ )
102
+ yield result