Datasets:

ArXiv:
License:
Georgios Spithourakis commited on
Commit
b036932
·
1 Parent(s): ac5db73

Create new file

Browse files
Files changed (1) hide show
  1. evi.py +179 -0
evi.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The PolyAI and HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ import json
18
+ import os
19
+
20
+ import datasets
21
+
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+
25
+ """ EVI Dataset"""
26
+
27
+ _CITATION = """\
28
+ @inproceedings{Spithourakis2022evi,
29
+ author = {Georgios P. Spithourakis and Ivan Vuli\'{c} and Micha\l{} Lis and I\~{n}igo Casanueva and Pawe\l{} Budzianowski},
30
+ title = {{EVI}: Multilingual Spoken Dialogue Tasks and Dataset for Knowledge-Based Enrolment, Verification, and Identification},
31
+ year = {2022},
32
+ note = {Data available at https://github.com/PolyAI-LDN/evi-paper},
33
+ url = {https://arxiv.org/abs/2204.13496},
34
+ booktitle = {Findings of NAACL (publication pending)}
35
+ }
36
+ """ # noqa
37
+
38
+ _ALL_CONFIGS = sorted([
39
+ "en-GB", "fr-FR", "pl-PL"
40
+ ])
41
+
42
+ _DESCRIPTION = "EVI is a dataset for enrolment, identification, and verification" # noqa
43
+
44
+ _HOMEPAGE_URL = "https://arxiv.org/abs/2204.13496"
45
+
46
+ _AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
47
+
48
+ _VERSION = datasets.Version("0.0.4", "")
49
+
50
+
51
+ class EviConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for EVI"""
53
+
54
+ def __init__(
55
+ self, name, version, description, homepage, audio_data_url
56
+ ):
57
+ super().__init__(
58
+ name=self.name,
59
+ version=version,
60
+ description=self.description,
61
+ )
62
+ self.name = name
63
+ self.description = description
64
+ self.homepage = homepage
65
+ self.audio_data_url = audio_data_url
66
+
67
+
68
+ def _build_config(name):
69
+ return EviConfig(
70
+ name=name,
71
+ version=_VERSION,
72
+ description=_DESCRIPTION,
73
+ homepage=_HOMEPAGE_URL,
74
+ audio_data_url=_AUDIO_DATA_URL,
75
+ )
76
+
77
+
78
+ class Evi(datasets.GeneratorBasedBuilder):
79
+
80
+ DEFAULT_WRITER_BATCH_SIZE = 1000
81
+ BUILDER_CONFIGS = [
82
+ _build_config(name) for name in _ALL_CONFIGS
83
+ ]
84
+
85
+ def _info(self):
86
+ task_templates = None
87
+ langs = _ALL_CONFIGS
88
+ features = datasets.Features(
89
+ {
90
+ "lang_id": datasets.ClassLabel(names=langs),
91
+ "dialogue_id": datasets.Value("string"),
92
+ "speaker_id": datasets.Value("string"),
93
+ "turn_id": datasets.Value("int32"),
94
+ #
95
+ "target_profile_id": datasets.Value("string"),
96
+ #
97
+ "asr_transcription": datasets.Value("string"),
98
+ "asr_nbest": datasets.Sequence(datasets.Value("string")),
99
+ #
100
+ "path": datasets.Value("string"),
101
+ "audio": datasets.Audio(sampling_rate=8_000),
102
+ }
103
+ )
104
+
105
+ return datasets.DatasetInfo(
106
+ version=self.config.version,
107
+ description=self.config.description,
108
+ homepage=self.config.homepage,
109
+ license="CC-BY-4.0",
110
+ citation=_CITATION,
111
+ features=features,
112
+ supervised_keys=None,
113
+ task_templates=task_templates,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager):
117
+ langs = ([self.config.name])
118
+
119
+ audio_path = dl_manager.download_and_extract(
120
+ self.config.audio_data_url
121
+ )
122
+ text_path = ""
123
+ lang2text_path = {
124
+ _lang: os.path.join(
125
+ text_path,
126
+ f"dialogues.{_lang.split('-')[0]}.csv"
127
+ )
128
+ for _lang in langs
129
+ }
130
+ lang2audio_path = {
131
+ _lang: os.path.join(
132
+ audio_path,
133
+ f"{_lang.split('-')[0]}"
134
+ )
135
+ for _lang in langs
136
+ }
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TEST,
140
+ gen_kwargs={
141
+ "audio_paths": lang2audio_path,
142
+ "text_paths": lang2text_path,
143
+ },
144
+ )
145
+ ]
146
+
147
+ def _generate_examples(self, audio_paths, text_paths):
148
+ key = 0
149
+ for lang in text_paths.keys():
150
+ text_path = text_paths[lang]
151
+ audio_path = audio_paths[lang]
152
+ with open(text_path, encoding="utf-8") as fin:
153
+ reader = csv.DictReader(
154
+ fin, delimiter="\t", skipinitialspace=True
155
+ )
156
+ for dictrow in reader:
157
+ dialogue_id = dictrow["dialogue_id"]
158
+ turn_id = dictrow["turn_num"]
159
+ file_path = os.path.join(
160
+ audio_path,
161
+ dialogue_id,
162
+ f'{turn_id}.wav'
163
+ )
164
+ if not os.path.isfile(file_path):
165
+ file_path = None
166
+ example = {
167
+ "lang_id": _ALL_CONFIGS.index(lang),
168
+ "dialogue_id": dialogue_id,
169
+ "speaker_id": dictrow["speaker_id"],
170
+ "turn_id": turn_id,
171
+ "target_profile_id": dictrow["scenario_id"],
172
+ "asr_transcription": dictrow["transcription"],
173
+ "asr_nbest": json.loads(dictrow["nbest"]),
174
+ "path": file_path,
175
+ "audio": file_path,
176
+ }
177
+ print(example)
178
+ yield key, example
179
+ key += 1