Datasets:
Tasks:
Automatic Speech Recognition
Formats:
parquet
Languages:
English
Size:
100K - 1M
ArXiv:
License:
polinaeterna
commited on
Commit
·
ee802ec
1
Parent(s):
24aa3f7
add streaming
Browse files
ami.py
CHANGED
|
@@ -283,19 +283,6 @@ class AMIConfig(datasets.BuilderConfig):
|
|
| 283 |
|
| 284 |
|
| 285 |
class AMI(datasets.GeneratorBasedBuilder):
|
| 286 |
-
"""
|
| 287 |
-
GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
|
| 288 |
-
labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
|
| 289 |
-
and unsupervised training (this implementation contains only labelled data for now).
|
| 290 |
-
Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
|
| 291 |
-
and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
|
| 292 |
-
sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
|
| 293 |
-
for speech recognition training, and to filter out segments with low-quality transcription. For system training,
|
| 294 |
-
GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
|
| 295 |
-
For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
|
| 296 |
-
and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
|
| 297 |
-
are re-processed by professional human transcribers to ensure high transcription quality.
|
| 298 |
-
"""
|
| 299 |
|
| 300 |
VERSION = datasets.Version("1.0.0")
|
| 301 |
|
|
@@ -331,9 +318,9 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
| 331 |
dev_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="dev", _id=m) for m in _VALIDATION_SAMPLE_IDS}
|
| 332 |
eval_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="eval", _id=m) for m in _EVAL_SAMPLE_IDS}
|
| 333 |
|
| 334 |
-
train_audio_archives = dl_manager.
|
| 335 |
-
dev_audio_archives = dl_manager.
|
| 336 |
-
eval_audio_archives = dl_manager.
|
| 337 |
|
| 338 |
train_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="train"))
|
| 339 |
dev_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="dev"))
|
|
@@ -342,20 +329,37 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
| 342 |
return [
|
| 343 |
datasets.SplitGenerator(
|
| 344 |
name=datasets.Split.TRAIN,
|
| 345 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
),
|
| 347 |
datasets.SplitGenerator(
|
| 348 |
name=datasets.Split.VALIDATION,
|
| 349 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 350 |
),
|
| 351 |
datasets.SplitGenerator(
|
| 352 |
name=datasets.Split.TEST,
|
| 353 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 354 |
),
|
| 355 |
]
|
| 356 |
|
| 357 |
-
def _generate_examples(self,
|
| 358 |
# open annotation file
|
|
|
|
|
|
|
| 359 |
with open(annotation, "r", encoding="utf-8") as f:
|
| 360 |
transcriptions = {}
|
| 361 |
for line in f.readlines():
|
|
@@ -363,8 +367,9 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
| 363 |
_id = line_items[0]
|
| 364 |
text = " ".join(line_items[1:])
|
| 365 |
_, segment_id, microphone_id, speaker_id, begin_time, end_time = _id.split("_")
|
|
|
|
| 366 |
|
| 367 |
-
transcriptions[
|
| 368 |
"audio_id": _id,
|
| 369 |
"segment_id": segment_id,
|
| 370 |
"text": text,
|
|
@@ -374,10 +379,21 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
| 374 |
"speaker_id": speaker_id,
|
| 375 |
}
|
| 376 |
|
| 377 |
-
for
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
audio_file = os.path.join(audio[folder_id], folder_id, file_name)
|
| 381 |
-
result["audio"] = audio_file
|
| 382 |
-
yield _audio_id, result
|
| 383 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 283 |
|
| 284 |
|
| 285 |
class AMI(datasets.GeneratorBasedBuilder):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
VERSION = datasets.Version("1.0.0")
|
| 288 |
|
|
|
|
| 318 |
dev_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="dev", _id=m) for m in _VALIDATION_SAMPLE_IDS}
|
| 319 |
eval_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="eval", _id=m) for m in _EVAL_SAMPLE_IDS}
|
| 320 |
|
| 321 |
+
train_audio_archives = dl_manager.download(train_audio_files)
|
| 322 |
+
dev_audio_archives = dl_manager.download(dev_audio_files)
|
| 323 |
+
eval_audio_archives = dl_manager.download(eval_audio_files)
|
| 324 |
|
| 325 |
train_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="train"))
|
| 326 |
dev_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="dev"))
|
|
|
|
| 329 |
return [
|
| 330 |
datasets.SplitGenerator(
|
| 331 |
name=datasets.Split.TRAIN,
|
| 332 |
+
gen_kwargs={
|
| 333 |
+
"audio_archives": [dl_manager.iter_archive(archive) for archive in train_audio_archives.values()],
|
| 334 |
+
"local_extracted_archives_paths": dl_manager.extract(train_audio_archives).values() if not dl_manager.is_streaming else [None] * len(train_audio_archives),
|
| 335 |
+
"annotation": train_annotation,
|
| 336 |
+
"split": "train"
|
| 337 |
+
},
|
| 338 |
),
|
| 339 |
datasets.SplitGenerator(
|
| 340 |
name=datasets.Split.VALIDATION,
|
| 341 |
+
gen_kwargs={
|
| 342 |
+
"audio_archives": [dl_manager.iter_archive(archive) for archive in dev_audio_archives.values()],
|
| 343 |
+
"local_extracted_archives_paths": dl_manager.extract(dev_audio_archives).values() if not dl_manager.is_streaming else [None] * len(dev_audio_archives),
|
| 344 |
+
"annotation": dev_annotation,
|
| 345 |
+
"split": "dev"
|
| 346 |
+
},
|
| 347 |
),
|
| 348 |
datasets.SplitGenerator(
|
| 349 |
name=datasets.Split.TEST,
|
| 350 |
+
gen_kwargs={
|
| 351 |
+
"audio_archives": [dl_manager.iter_archive(archive) for archive in eval_audio_archives.values()],
|
| 352 |
+
"local_extracted_archives_paths": dl_manager.extract(eval_audio_archives).values() if not dl_manager.is_streaming else [None] * len(eval_audio_archives),
|
| 353 |
+
"annotation": eval_annotation,
|
| 354 |
+
"split": "eval"
|
| 355 |
+
},
|
| 356 |
),
|
| 357 |
]
|
| 358 |
|
| 359 |
+
def _generate_examples(self, audio_archives, local_extracted_archives_paths, annotation, split):
|
| 360 |
# open annotation file
|
| 361 |
+
assert len(audio_archives) == len(local_extracted_archives_paths)
|
| 362 |
+
|
| 363 |
with open(annotation, "r", encoding="utf-8") as f:
|
| 364 |
transcriptions = {}
|
| 365 |
for line in f.readlines():
|
|
|
|
| 367 |
_id = line_items[0]
|
| 368 |
text = " ".join(line_items[1:])
|
| 369 |
_, segment_id, microphone_id, speaker_id, begin_time, end_time = _id.split("_")
|
| 370 |
+
audio_filename = "_".join([split, _id.lower()]) + ".wav"
|
| 371 |
|
| 372 |
+
transcriptions[audio_filename] = {
|
| 373 |
"audio_id": _id,
|
| 374 |
"segment_id": segment_id,
|
| 375 |
"text": text,
|
|
|
|
| 379 |
"speaker_id": speaker_id,
|
| 380 |
}
|
| 381 |
|
| 382 |
+
for archive, local_archive_path in zip(audio_archives, local_extracted_archives_paths):
|
| 383 |
+
for audio_filename, audio_file in archive:
|
| 384 |
+
audio_meta = transcriptions[audio_filename.split("/")[-1]]
|
|
|
|
|
|
|
|
|
|
| 385 |
|
| 386 |
+
yield audio_filename, {
|
| 387 |
+
"segment_id": audio_meta["segment_id"],
|
| 388 |
+
"audio_id": audio_meta["audio_id"],
|
| 389 |
+
"audio": {
|
| 390 |
+
"path": os.path.join(local_archive_path,
|
| 391 |
+
audio_filename) if local_archive_path else audio_filename,
|
| 392 |
+
"bytes": audio_file.read(),
|
| 393 |
+
},
|
| 394 |
+
"text": audio_meta["text"],
|
| 395 |
+
"begin_time": audio_meta["begin_time"],
|
| 396 |
+
"end_time": audio_meta["end_time"],
|
| 397 |
+
"microphone_id": audio_meta["microphone_id"],
|
| 398 |
+
"speaker_id": audio_meta["speaker_id"],
|
| 399 |
+
}
|