calicxy commited on
Commit
d12622d
·
1 Parent(s): 4686892

adding local loading script

Browse files
Files changed (1) hide show
  1. loadingScript_imda.py +220 -0
loadingScript_imda.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import datasets
4
+ import pandas as pd
5
+ from sklearn.model_selection import train_test_split
6
+
7
+ _DESCRIPTION = """\
8
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
9
+ """
10
+
11
+ _CITATION = """\
12
+ """
13
+ _CHANNEL_CONFIGS = sorted([
14
+ "CHANNEL0", "CHANNEL1", "CHANNEL2"
15
+ ])
16
+
17
+ _GENDER_CONFIGS = sorted(["F", "M"])
18
+
19
+ _RACE_CONFIGS = sorted(["CHINESE", "MALAY", "INDIAN", "OTHERS"])
20
+
21
+ _HOMEPAGE = "https://huggingface.co/indonesian-nlp/librivox-indonesia"
22
+
23
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
24
+
25
+ _PATH_TO_DATA = r'C:\Users\calic\Downloads\huggingface-dataset\imda-dataset\IMDA - National Speech Corpus\PART1'
26
+ # _PATH_TO_DATA = './PART1/DATA'
27
+
28
+ class Minds14Config(datasets.BuilderConfig):
29
+ """BuilderConfig for xtreme-s"""
30
+
31
+ def __init__(
32
+ self, channel, gender, race, description, homepage, path_to_data
33
+ ):
34
+ super(Minds14Config, self).__init__(
35
+ name=channel+gender+race,
36
+ version=datasets.Version("1.0.0", ""),
37
+ description=self.description,
38
+ )
39
+ self.channel = channel
40
+ self.gender = gender
41
+ self.race = race
42
+ self.description = description
43
+ self.homepage = homepage
44
+ self.path_to_data = path_to_data
45
+
46
+
47
+ def _build_config(channel, gender, race):
48
+ return Minds14Config(
49
+ channel=channel,
50
+ gender=gender,
51
+ race=race,
52
+ description=_DESCRIPTION,
53
+ homepage=_HOMEPAGE,
54
+ path_to_data=_PATH_TO_DATA,
55
+ )
56
+
57
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
+ class NewDataset(datasets.GeneratorBasedBuilder):
59
+ """TODO: Short description of my dataset."""
60
+
61
+ VERSION = datasets.Version("1.1.0")
62
+
63
+ # This is an example of a dataset with multiple configurations.
64
+ # If you don't want/need to define several sub-sets in your dataset,
65
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
+
67
+ # If you need to make complex sub-parts in the datasets with configurable options
68
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
70
+
71
+ # You will be able to load one or the other configurations in the following list with
72
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
73
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
74
+ BUILDER_CONFIGS = []
75
+ for channel in _CHANNEL_CONFIGS + ["all"]:
76
+ for gender in _GENDER_CONFIGS + ["all"]:
77
+ for race in _RACE_CONFIGS + ["all"]:
78
+ BUILDER_CONFIGS.append(_build_config(channel, gender, race))
79
+ # BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
80
+
81
+ DEFAULT_CONFIG_NAME = "allallall" # It's not mandatory to have a default configuration. Just use one if it make sense.
82
+
83
+ def _info(self):
84
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
85
+ task_templates = None
86
+ # mics = _CHANNEL_CONFIGS
87
+ features = datasets.Features(
88
+ {
89
+ "audio": datasets.features.Audio(sampling_rate=16000),
90
+ "transcript": datasets.Value("string"),
91
+ "mic": datasets.Value("string"),
92
+ "audio_name": datasets.Value("string"),
93
+ "gender": datasets.Value("string"),
94
+ "race": datasets.Value("string"),
95
+ }
96
+ )
97
+
98
+ return datasets.DatasetInfo(
99
+ # This is the description that will appear on the datasets page.
100
+ description=_DESCRIPTION,
101
+ # This defines the different columns of the dataset and their types
102
+ features=features, # Here we define them above because they are different between the two configurations
103
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
104
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
105
+ supervised_keys=("audio", "transcript"),
106
+ # Homepage of the dataset for documentation
107
+ homepage=_HOMEPAGE,
108
+ # License for the dataset if available
109
+ license=_LICENSE,
110
+ # Citation for the dataset
111
+ citation=_CITATION,
112
+ task_templates=task_templates,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
117
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
118
+ mics = (
119
+ _CHANNEL_CONFIGS
120
+ if self.config.channel == "all"
121
+ else [self.config.channel]
122
+ )
123
+
124
+ gender = (
125
+ _GENDER_CONFIGS
126
+ if self.config.gender == "all"
127
+ else [self.config.gender]
128
+ )
129
+
130
+ race = (
131
+ _RACE_CONFIGS
132
+ if self.config.race == "all"
133
+ else [self.config.race]
134
+ )
135
+
136
+ # augment speaker ids directly here
137
+ # read the speaker information
138
+ train_speaker_ids = []
139
+ test_speaker_ids = []
140
+ path_to_speaker = os.path.join(self.config.path_to_data, "DOC", "Speaker Information (Part 1).XLSX")
141
+ speaker_df = pd.read_excel(path_to_speaker, dtype={'SCD/PART1': object})
142
+ for g in gender:
143
+ for r in race:
144
+ X = speaker_df[(speaker_df["ACC"]==r) & (speaker_df["SEX"]==g)]
145
+ X_train, X_test = train_test_split(X, test_size=0.3, random_state=42, shuffle=True)
146
+ train_speaker_ids.extend(X_train["SCD/PART1"])
147
+ test_speaker_ids.extend(X_test["SCD/PART1"])
148
+
149
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
150
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
151
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
152
+ return [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TRAIN,
155
+ gen_kwargs={
156
+ "path_to_data": self.config.path_to_data,
157
+ "speaker_metadata":speaker_df,
158
+ # "speaker_ids": train_speaker_ids,
159
+ "speaker_ids":["0001"],
160
+ "mics": mics,
161
+ "dl_manager": dl_manager
162
+ },
163
+ ),
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.TEST,
166
+ gen_kwargs={
167
+ "path_to_data": self.config.path_to_data,
168
+ "speaker_metadata":speaker_df,
169
+ # "speaker_ids": test_speaker_ids,
170
+ "speaker_ids": ["0003"],
171
+ "mics": mics,
172
+ "dl_manager": dl_manager
173
+ },
174
+ ),
175
+ ]
176
+
177
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
178
+ def _generate_examples(
179
+ self,
180
+ path_to_data,
181
+ speaker_metadata,
182
+ speaker_ids,
183
+ mics,
184
+ dl_manager
185
+ ):
186
+ id_ = 0
187
+ for mic in mics:
188
+ for speaker in speaker_ids:
189
+ # TRANSCRIPT: in the case of error, if no file found then dictionary will b empty
190
+ metadata_path = os.path.join(path_to_data, "DATA", mic, "SCRIPT", mic[-1]+speaker+'*.TXT')
191
+ script_list = glob.glob(metadata_path)
192
+ d = {}
193
+ for script in script_list:
194
+ line_num = 0
195
+ with open(script, encoding='utf-8-sig') as f:
196
+ for line in f:
197
+ if line_num == 0:
198
+ key = line.split("\t")[0]
199
+ line_num += 1
200
+ elif line_num == 1:
201
+ d[key] = line.strip()
202
+ line_num -= 1
203
+ # AUDIO: in the case of error it will skip the speaker
204
+ archive_path = os.path.join(path_to_data, "DATA", mic, "WAVE", "SPEAKER"+speaker+'.zip')
205
+ # check that archive path exists, else will not open the archive
206
+ if os.path.exists(archive_path):
207
+ audio_files = dl_manager.iter_archive(archive_path)
208
+ for path, f in audio_files:
209
+ # bug catching if any error?
210
+ result = {}
211
+ full_path = os.path.join(archive_path, path) if archive_path else path # bug catching here
212
+ result["audio"] = {"path": full_path, "bytes": f.read()}
213
+ result["transcript"] = d[f.name[-13:-4]]
214
+ result["audio_name"] = path
215
+ result["mic"] = mic
216
+ metadata_row = speaker_metadata.loc[speaker_metadata["SCD/PART1"]==speaker].iloc[0]
217
+ result["gender"]=metadata_row["SEX"]
218
+ result["race"]=metadata_row["ACC"]
219
+ yield id_, result
220
+ id_ += 1