Upload dataset_2.py
Browse files- dataset_2.py +148 -0
dataset_2.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The PolyAI and HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import csv
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
import datasets
|
| 20 |
+
|
| 21 |
+
logger = datasets.logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
""" Self Dataset"""
|
| 25 |
+
|
| 26 |
+
_CITATION = """\
|
| 27 |
+
@article{gerz2021multilingual,
|
| 28 |
+
title={Multilingual and cross-lingual intent detection from spoken data},
|
| 29 |
+
author={Gerz, Daniela and Su, Pei-Hao and Kusztos, Razvan and Mondal, Avishek and Lis, Michal and Singhal, Eshan and Mrk{\v{s}}i{\'c}, Nikola and Wen, Tsung-Hsien and Vuli{\'c}, Ivan},
|
| 30 |
+
journal={arXiv preprint arXiv:2104.08524},
|
| 31 |
+
year={2021}
|
| 32 |
+
}
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
_DESCRIPTION = """\
|
| 36 |
+
Self Dataset is training and evaluation resource for intent
|
| 37 |
+
detection task with spoken data. It covers 14
|
| 38 |
+
intents extracted from a commercial system
|
| 39 |
+
in the e-banking domain, associated with spoken examples in 14 diverse language varieties.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
_ALL_CONFIGS = sorted([
|
| 43 |
+
"cs-CZ", "de-DE", "en-AU", "en-GB", "en-US", "es-ES", "fr-FR", "it-IT", "ko-KR", "nl-NL", "pl-PL", "pt-PT", "ru-RU", "zh-CN"
|
| 44 |
+
])
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
_DESCRIPTION = "Self Dataset is a dataset for the intent detection task with spoken data. It covers 14 intents extracted from a commercial system in the e-banking domain, associated with spoken examples in 14 diverse language varieties."
|
| 48 |
+
|
| 49 |
+
_HOMEPAGE_URL = "https://arxiv.org/abs/2104.08524"
|
| 50 |
+
|
| 51 |
+
_DATA_URL = "https://www.dropbox.com/scl/fi/wr608qdxo5jnm0azwkbak/dataset.zip?rlkey=wktncggwoka8qdm46cjb41yac&dl=0"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class ATCConfig(datasets.BuilderConfig):
|
| 55 |
+
"""BuilderConfig for xtreme-s"""
|
| 56 |
+
|
| 57 |
+
def __init__(
|
| 58 |
+
self, name, description, homepage, data_url
|
| 59 |
+
):
|
| 60 |
+
super(ATCConfig, self).__init__(
|
| 61 |
+
name=self.name,
|
| 62 |
+
version=datasets.Version("1.0.0", ""),
|
| 63 |
+
description=self.description,
|
| 64 |
+
)
|
| 65 |
+
self.name = name
|
| 66 |
+
self.description = description
|
| 67 |
+
self.homepage = homepage
|
| 68 |
+
self.data_url = data_url
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _build_config(name):
|
| 72 |
+
return ATCConfig(
|
| 73 |
+
name=name,
|
| 74 |
+
description=_DESCRIPTION,
|
| 75 |
+
homepage=_HOMEPAGE_URL,
|
| 76 |
+
data_url=_DATA_URL,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class ATC(datasets.GeneratorBasedBuilder):
|
| 81 |
+
|
| 82 |
+
DEFAULT_WRITER_BATCH_SIZE = 1000
|
| 83 |
+
BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS + ["all"]]
|
| 84 |
+
|
| 85 |
+
def _info(self):
|
| 86 |
+
task_templates = None
|
| 87 |
+
langs = _ALL_CONFIGS
|
| 88 |
+
features = datasets.Features(
|
| 89 |
+
{
|
| 90 |
+
"file_path": datasets.Value("string"),
|
| 91 |
+
"reference": datasets.Value("string"),
|
| 92 |
+
|
| 93 |
+
}
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
return datasets.DatasetInfo(
|
| 97 |
+
description=_DESCRIPTION,
|
| 98 |
+
features=features,
|
| 99 |
+
supervised_keys=("file_path", "reference"),
|
| 100 |
+
homepage=self.config.homepage,
|
| 101 |
+
citation=_CITATION,
|
| 102 |
+
task_templates=task_templates,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
def _split_generators(self, dl_manager):
|
| 106 |
+
langs = (
|
| 107 |
+
_ALL_CONFIGS
|
| 108 |
+
if self.config.name == "all"
|
| 109 |
+
else [self.config.name]
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
archive_path = dl_manager.download_and_extract(self.config.data_url)
|
| 113 |
+
audio_path = dl_manager.extract(
|
| 114 |
+
os.path.join(archive_path, "dataset", "audio.zip")
|
| 115 |
+
)
|
| 116 |
+
text_path = dl_manager.extract(
|
| 117 |
+
os.path.join(archive_path, "dataset", "text.zip")
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
text_path = {l: os.path.join(text_path, f"{l}.csv") for l in langs}
|
| 121 |
+
|
| 122 |
+
return [
|
| 123 |
+
datasets.SplitGenerator(
|
| 124 |
+
name=datasets.Split.TRAIN,
|
| 125 |
+
gen_kwargs={
|
| 126 |
+
"audio_path": audio_path,
|
| 127 |
+
"text_paths": text_path,
|
| 128 |
+
},
|
| 129 |
+
)
|
| 130 |
+
]
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def _generate_examples(self, audio_path, text_paths):
|
| 134 |
+
key = 0
|
| 135 |
+
for lang in text_paths.keys():
|
| 136 |
+
text_path = text_paths[lang]
|
| 137 |
+
with open(text_path, encoding="utf-8") as csv_file:
|
| 138 |
+
csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
|
| 139 |
+
next(csv_reader)
|
| 140 |
+
for row in csv_reader:
|
| 141 |
+
file_path, reference = row
|
| 142 |
+
|
| 143 |
+
file_path = os.path.join(audio_path, *file_path.split("/"))
|
| 144 |
+
yield key, {
|
| 145 |
+
"path": file_path,
|
| 146 |
+
"reference": transcription,
|
| 147 |
+
}
|
| 148 |
+
key += 1
|