dralois commited on
Commit
c1c9dcb
·
verified ·
1 Parent(s): 77770cd

Delete loading script

Browse files
Files changed (1) hide show
  1. common_language.py +0 -168
common_language.py DELETED
@@ -1,168 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Common Language Dataset"""
16
-
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DATA_URL = "data/CommonLanguage.zip"
23
-
24
- _CITATION = """\
25
- @dataset{ganesh_sinisetty_2021_5036977,
26
- author = {Ganesh Sinisetty and
27
- Pavlo Ruban and
28
- Oleksandr Dymov and
29
- Mirco Ravanelli},
30
- title = {CommonLanguage},
31
- month = jun,
32
- year = 2021,
33
- publisher = {Zenodo},
34
- version = {0.1},
35
- doi = {10.5281/zenodo.5036977},
36
- url = {https://doi.org/10.5281/zenodo.5036977}
37
- }
38
- """
39
-
40
- _DESCRIPTION = """\
41
- This dataset is composed of speech recordings from languages that were carefully selected from the CommonVoice database.
42
- The total duration of audio recordings is 45.1 hours (i.e., 1 hour of material for each language).
43
- The dataset has been extracted from CommonVoice to train language-id systems.
44
- """
45
-
46
- _HOMEPAGE = "https://zenodo.org/record/5036977"
47
-
48
- _LICENSE = "https://creativecommons.org/licenses/by/4.0/legalcode"
49
-
50
- _LANGUAGES = [
51
- "Arabic",
52
- "Basque",
53
- "Breton",
54
- "Catalan",
55
- "Chinese_China",
56
- "Chinese_Hongkong",
57
- "Chinese_Taiwan",
58
- "Chuvash",
59
- "Czech",
60
- "Dhivehi",
61
- "Dutch",
62
- "English",
63
- "Esperanto",
64
- "Estonian",
65
- "French",
66
- "Frisian",
67
- "Georgian",
68
- "German",
69
- "Greek",
70
- "Hakha_Chin",
71
- "Indonesian",
72
- "Interlingua",
73
- "Italian",
74
- "Japanese",
75
- "Kabyle",
76
- "Kinyarwanda",
77
- "Kyrgyz",
78
- "Latvian",
79
- "Maltese",
80
- "Mangolian",
81
- "Persian",
82
- "Polish",
83
- "Portuguese",
84
- "Romanian",
85
- "Romansh_Sursilvan",
86
- "Russian",
87
- "Sakha",
88
- "Slovenian",
89
- "Spanish",
90
- "Swedish",
91
- "Tamil",
92
- "Tatar",
93
- "Turkish",
94
- "Ukranian",
95
- "Welsh",
96
- ]
97
-
98
-
99
- class CommonLanguage(datasets.GeneratorBasedBuilder):
100
- VERSION = datasets.Version("0.1.0")
101
-
102
- BUILDER_CONFIGS = [
103
- datasets.BuilderConfig(name="full", version=VERSION, description="The entire Common Language dataset"),
104
- ]
105
-
106
- def _info(self):
107
- features = datasets.Features(
108
- {
109
- "client_id": datasets.Value("string"),
110
- "path": datasets.Value("string"),
111
- "audio": datasets.Audio(sampling_rate=48_000),
112
- "sentence": datasets.Value("string"),
113
- "age": datasets.Value("string"),
114
- "gender": datasets.Value("string"),
115
- "language": datasets.ClassLabel(names=_LANGUAGES),
116
- }
117
- )
118
-
119
- return datasets.DatasetInfo(
120
- description=_DESCRIPTION,
121
- features=features,
122
- supervised_keys=None,
123
- homepage=_HOMEPAGE,
124
- license=_LICENSE,
125
- citation=_CITATION,
126
- )
127
-
128
- def _split_generators(self, dl_manager):
129
- """Returns SplitGenerators."""
130
-
131
- dl_path = dl_manager.download_and_extract(_DATA_URL)
132
- archive_path = os.path.join(dl_path, "common_voice_kpd")
133
-
134
- return [
135
- datasets.SplitGenerator(
136
- name=datasets.Split.TRAIN,
137
- gen_kwargs={"archive_path": archive_path, "split": "train"},
138
- ),
139
- datasets.SplitGenerator(
140
- name=datasets.Split.VALIDATION,
141
- gen_kwargs={"archive_path": archive_path, "split": "dev"},
142
- ),
143
- datasets.SplitGenerator(
144
- name=datasets.Split.TEST,
145
- gen_kwargs={"archive_path": archive_path, "split": "test"},
146
- ),
147
- ]
148
-
149
- def _generate_examples(self, archive_path, split):
150
- """Yields examples."""
151
- key = 0
152
- for language in _LANGUAGES:
153
- csv_path = os.path.join(archive_path, language, f"{split}.csv")
154
- with open(csv_path, encoding="utf-16") as fin:
155
- next(fin) # skip the header
156
- for line in fin:
157
- client_id, wav_name, sentence, age, gender = line.strip().split("\t")[1:]
158
- path = os.path.join(archive_path, language, split, client_id, wav_name)
159
- yield key, {
160
- "client_id": client_id,
161
- "path": path,
162
- "audio": path,
163
- "sentence": sentence,
164
- "age": age,
165
- "gender": gender,
166
- "language": language,
167
- }
168
- key += 1