paulpeyret-biophonia commited on
Commit
3ba373e
·
1 Parent(s): 1642f48

add dataset building script

Browse files
Files changed (3) hide show
  1. NBMSet24.py +263 -0
  2. classes.py +21 -0
  3. descriptions.py +2 -0
NBMSet24.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """NBMSet24: Nocturnal Bird Migration Dataset"""
2
+
3
+ import os
4
+ import datasets
5
+ import pandas as pd
6
+ from tqdm.auto import tqdm
7
+ import tarfile
8
+
9
+ from classes import BIRD_NAMES_NBM
10
+
11
+ from descriptions import _NBM_CITATION, _NBM_DESCRIPTION
12
+
13
+
14
+ #############################################
15
+ _NBMSET24_CITATION = """\
16
+ @article{birdset,
17
+ title = {NBMSet24},
18
+ author={anonymous},
19
+ year={2025}
20
+ }
21
+ """
22
+ _NBMSET24_DESCRIPTION = """\
23
+ Dataset from https://arxiv.org/abs/2412.03633
24
+ """
25
+
26
+ base_url = "https://huggingface.co/datasets/DBD-research-group/NBMSet24"
27
+
28
+
29
+ def _extract_all_to_same_folder(tar_path, output_dir):
30
+ """custom extraction for tar.gz files, that extracts all files to output_dir without subfolders"""
31
+ # check if data already exists
32
+ if not os.path.isfile(output_dir) and os.path.isdir(output_dir) and os.listdir(output_dir):
33
+ return output_dir
34
+ os.makedirs(output_dir, exist_ok=True)
35
+
36
+ with tarfile.open(tar_path, "r:gz") as tar:
37
+ for member in tar.getmembers():
38
+ if member.isfile():
39
+ member.name = os.path.basename(member.name)
40
+ tar.extract(member, path=output_dir)
41
+
42
+ return output_dir
43
+
44
+
45
+ def _extract_and_delete(dl_dir: dict) -> dict:
46
+ """extracts downloaded files and deletes the archive file immediately, with progress bar.
47
+ only the processed archive and its content are saved at the same time."""
48
+ audio_paths = {name: [] for name, data in dl_dir.items() if isinstance(data, list)}
49
+ for name, data in dl_dir.items():
50
+ if not isinstance(data, list):
51
+ continue
52
+
53
+ # extract and immediately delete archives
54
+ for path in tqdm(data, f"Extracting {name} split"):
55
+ head, tail = os.path.split(path)
56
+ output_dir = os.path.join(head, "extracted", tail)
57
+ #audio_path = dl_manager.extract(path) # if all archive files are without subfolders this works just fine
58
+ audio_path = _extract_all_to_same_folder(path, output_dir)
59
+ os.remove(path)
60
+ # datasets >3.0.0 hadels cach differently
61
+ os.remove(f"{path}.lock") if os.path.exists(f"{path}.lock") else None
62
+ os.remove(f"{path}.json") if os.path.exists(f"{path}.json") else None
63
+ audio_paths[name].append(audio_path)
64
+
65
+ return audio_paths
66
+
67
+
68
+ class NBMSetConfig(datasets.BuilderConfig):
69
+ def __init__(
70
+ self,
71
+ name,
72
+ citation,
73
+ class_list,
74
+ # genus_list,
75
+ # species_group_list,
76
+ # order_list,
77
+ **kwargs):
78
+ super().__init__(version=datasets.Version("0.0.4"), name=name, **kwargs)
79
+
80
+ features = datasets.Features({
81
+ "audio": datasets.Audio(sampling_rate=32_000, mono=True, decode=False),
82
+ "filepath": datasets.Value("string"),
83
+ "start_time": datasets.Value("float64"),
84
+ "end_time": datasets.Value("float64"),
85
+ "low_freq": datasets.Value("int64"),
86
+ "high_freq": datasets.Value("int64"),
87
+ "ebird_code": datasets.ClassLabel(names=class_list),
88
+ "ebird_code_multilabel": datasets.Sequence(datasets.ClassLabel(names=class_list)),
89
+ "ebird_code_secondary": datasets.Sequence(datasets.Value("string")),
90
+ "label":datasets.Value("string"),
91
+ "original_label": datasets.Value("string"),
92
+ # "french_label":datasets.Value("string"),
93
+ # "call_type": datasets.Value("string"),
94
+ # "sex": datasets.Value("string"),
95
+ # "lat": datasets.Value("float64"),
96
+ # "long": datasets.Value("float64"),
97
+ # "length": datasets.Value("int64"),
98
+ # "microphone": datasets.Value("string"),
99
+ # "license": datasets.Value("string"),
100
+ # "source": datasets.Value("string"),
101
+ # "local_time": datasets.Value("string"),
102
+ "detected_events": datasets.Sequence(datasets.Sequence(datasets.Value("float64"))),
103
+ # "event_cluster": datasets.Sequence(datasets.Value("int64")),
104
+ # "peaks": datasets.Sequence(datasets.Value("float64")),
105
+ # "quality": datasets.Value("string"),
106
+ # "recordist": datasets.Value("string"),
107
+ # "genus": datasets.ClassLabel(names=genus_list),
108
+ # "species_group": datasets.ClassLabel(names=species_group_list),
109
+ # "order": datasets.ClassLabel(names=order_list),
110
+ # "genus_multilabel": datasets.Sequence(datasets.ClassLabel(names=genus_list)),
111
+ # "species_group_multilabel": datasets.Sequence(datasets.ClassLabel(names=species_group_list)),
112
+ # "order_multilabel": datasets.Sequence(datasets.ClassLabel(names=order_list)),
113
+ })
114
+
115
+ self.features = features
116
+ self.citation = citation
117
+
118
+
119
+ class BirdSet(datasets.GeneratorBasedBuilder):
120
+ """TODO: Short description of my dataset."""
121
+ # ram problems?
122
+ DEFAULT_WRITER_BATCH_SIZE = 500
123
+
124
+ BUILDER_CONFIGS = [
125
+ NBMSetConfig(
126
+ name="NBM",
127
+ description=_NBMSET24_DESCRIPTION,
128
+ citation=_NBMSET24_CITATION,
129
+ data_dir=f"{base_url}",
130
+ class_list=BIRD_NAMES_NBM,
131
+ # genus_list=classes.GENUS_NBM,
132
+ # species_group_list=classes.SPECIES_GROUP_NBM,
133
+ # order_list=classes.ORDER_NBM,
134
+ ),
135
+ # NBMSetConfig(
136
+ # name="NBM_xc",
137
+ # description=_NBMSET24_DESCRIPTION,
138
+ # citation=_NBMSET24_CITATION,
139
+ # data_dir=f"{base_url}",
140
+ # class_list=BIRD_NAMES_NBM,
141
+ # # genus_list=classes.GENUS_NBM,
142
+ # # species_group_list=classes.SPECIES_GROUP_NBM,
143
+ # # order_list=classes.ORDER_NBM,
144
+ # ),
145
+ # NBMSetConfig(
146
+ # name="NBM_scape",
147
+ # description=_NBMSET24_DESCRIPTION,
148
+ # citation=_NBMSET24_CITATION,
149
+ # data_dir=f"{base_url}",
150
+ # class_list=BIRD_NAMES_NBM,
151
+ # # genus_list=classes.GENUS_NBM,
152
+ # # species_group_list=classes.SPECIES_GROUP_NBM,
153
+ # # order_list=classes.ORDER_NBM,
154
+ # ),
155
+ ]
156
+
157
+ def _info(self):
158
+ return datasets.DatasetInfo(
159
+ description=_NBMSET24_DESCRIPTION + self.config.description,
160
+ features=self.config.features,
161
+ citation=self.config.citation + "\n" + _NBMSET24_DESCRIPTION,
162
+ )
163
+
164
+ def _split_generators(self, dl_manager):
165
+ ds_name = self.config.name
166
+ # settings for how much archives (tar.gz) files are uploaded for a specific dataset
167
+ train_files = {"NBM": 12
168
+ }
169
+
170
+ test_files = {"NBM": 1,
171
+ }
172
+
173
+ # test_5s_files = {"NBM",}
174
+
175
+ dl_dir = dl_manager.download({
176
+ "train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
177
+ "test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
178
+ # "test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files[ds_name] + 1)],
179
+ "meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
180
+ "meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
181
+ # "meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
182
+ })
183
+
184
+ # custom extraction that deletes archives right after extraction
185
+ audio_paths = _extract_and_delete(dl_dir) if not dl_manager.is_streaming else None
186
+
187
+ # construct split generators
188
+ # assumes every key in dl_dir of NAME also has meta_NAME
189
+ names = [name for name in dl_dir.keys() if not name.startswith("meta_")]
190
+ is_streaming = dl_manager.is_streaming
191
+
192
+ return [datasets.SplitGenerator(
193
+ name=name,
194
+ gen_kwargs={
195
+ "audio_archive_iterators": (dl_manager.iter_archive(archive_path) for archive_path in dl_dir[name]) if is_streaming else () ,
196
+ "audio_extracted_paths": audio_paths[name] if not is_streaming else (),
197
+ "meta_path": dl_dir[f"meta_{name}"],
198
+ "split": name
199
+ }
200
+ ) for name in names]
201
+
202
+
203
+ def _generate_examples(self, audio_archive_iterators, audio_extracted_paths, meta_path, split):
204
+ metadata = pd.read_parquet(meta_path)
205
+ if metadata.index.name != "filepath":
206
+ metadata.index = metadata["filepath"].str.split("/").apply(lambda x: x[-1])
207
+
208
+ idx = 0
209
+ # in case of streaming
210
+ for audio_archive_iterator in audio_archive_iterators:
211
+ for audio_path_in_archive, audio_file in audio_archive_iterator:
212
+ file_name = os.path.split(audio_path_in_archive)[-1]
213
+ rows = metadata.loc[[file_name]]
214
+ audio = audio_file.read()
215
+ for _, row in rows.iterrows():
216
+ yield idx, self._metadata_from_row(row, split, audio_path=file_name, audio=audio)
217
+ idx += 1
218
+
219
+ # in case of not streaming
220
+ for audio_extracted_path in audio_extracted_paths:
221
+ audio_files = os.listdir(audio_extracted_path)
222
+ current_metadata = metadata.loc[audio_files]
223
+ for audio_file, row in current_metadata.iterrows():
224
+ audio_path = os.path.join(audio_extracted_path, audio_file)
225
+ yield idx, self._metadata_from_row(row, split, audio_path=audio_path)
226
+ idx += 1
227
+
228
+
229
+ @staticmethod
230
+ def _metadata_from_row(row, split: str, audio_path=None, audio=None) -> dict:
231
+ return {"audio": audio_path if not audio else {"path": None, "bytes": audio},
232
+ "filepath": audio_path,
233
+ "start_time": row["start_time"],
234
+ "end_time": row["end_time"],
235
+ "low_freq": row["low_freq"],
236
+ "high_freq": row["high_freq"],
237
+ "ebird_code": row.get("ebird_code_multilabel", None),
238
+ "ebird_code_multilabel": row.get("ebird_code_multilabel", None),
239
+ "ebird_code_secondary": row.get("ebird_code_secondary", None),
240
+ "original_label":row["original_label"],
241
+ "label":row["label"],
242
+ # "french_label":row["french_label"],
243
+ # "call_type": row["call_type"],
244
+ # "sex": row["sex"],
245
+ # "lat": row["lat"],
246
+ # "long": row["long"],
247
+ # "length": row.get("length", None),
248
+ # "microphone": row["microphone"],
249
+ # "license": row.get("license", None),
250
+ # "source": row["source"],
251
+ # "local_time": row["local_time"],
252
+ "detected_events": row.get("detected_events", None),
253
+ # "event_cluster": row.get("event_cluster", None),
254
+ # "peaks": row.get("peaks", None),
255
+ # "quality": row.get("quality", None),
256
+ # "recordist": row.get("recordist", None),
257
+ # "genus": row.get("genus", None) if split != "test_5s" else None,
258
+ # "species_group": row.get("species_group", None) if split != "test_5s" else None,
259
+ # "order": row.get("order", None) if split != "test_5s" else None,
260
+ # "genus_multilabel": row.get("genus_multilabel", [row.get("genus")]),
261
+ # "species_group_multilabel": row.get("species_group_multilabel", [row.get("species_group")]),
262
+ # "order_multilabel": row.get("order_multilabel", [row.get("order")]),
263
+ }
classes.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BIRD_NAMES_NBM=['comsan', 'eurcoo', 'skylar', 'comcha', '', 'eursis', 'carcro1',
2
+ 'comgre', 'whiwag', 'gretit1', 'norlap', 'gnwtea', 'meapip1',
3
+ 'trepip', 'dunlin', 'sonthr1', 'graher1', 'eurcur', 'litowl1',
4
+ 'comnig1', 'eursco1', 'eutkne1', 'houspa', 'lirplo', 'corplo',
5
+ 'grscuc1', 'comqua1', 'eurgre1', 'blutit', 'eurrob1', 'eurbla',
6
+ 'comchi1', 'shttre1', 'grcgre1', 'mallar3', 'yellow2', 'ortbun1',
7
+ 'eupfly1', 'comsni', 'dunnoc1', 'winwre4', 'redwin', 'commoo3',
8
+ 'comcra', 'euroys1', 'litbit1', 'blksco1', 'bkhgul', 'eaywag1',
9
+ 'eurlin1', 'reebun', 'combuz1', 'spofly1', 'whimbr', 'bcnher',
10
+ 'tawowl1', 'blackc1', 'fieldf', 'misthr1', 'eurmag1', 'eugplo',
11
+ 'bkbplo', 'watrai1', 'litgre1', 'grnsan', 'comred1', 'brambl',
12
+ 'rinouz1', 'brnowl', 'brant', 'gragoo', 'woosan', 'hawfin',
13
+ 'firecr1', 'grebit1', 'goldcr1', 'bird1', 'blared1', 'purher1',
14
+ 'cangoo', 'eucdov', 'rinphe1', 'eurjac', 'comswi', 'redcro',
15
+ 'loeowl', 'norpin', 'eursta', 'pieavo1', 'tawpip1', 'woolar1',
16
+ 'eurnig1', 'spocra1', 'spored', 'eargre', 'comcuc', 'cetwar1',
17
+ 'litbus1', 'rook1', 'cowpig1', 'bkwsti', 'eurgol', 'medgul1',
18
+ 'gadwal', 'eurwig', 'eueowl1', 'eurwoo', 'eurdot', 'lbbgul',
19
+ 'yelgul1', 'larus1', 'olbpip', 'watpip1', 'eurjay1', 'eurser1',
20
+ 'eurbul', 'eurnut2', 'litgul', 'hergul', 'grewhi1', 'whwsco3',
21
+ 'eugori2', 'cretit2', 'wlwwar', 'comrav', 'grswoo', 'grywag']
descriptions.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _NBM_DESCRIPTION="This is NBM DATASET"
2
+ _NBM_CITATION="NBM Citation"