Create timit.py
Browse files
timit.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
|
| 3 |
+
"""TIMIT automatic speech recognition and speaker identification dataset."""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import textwrap
|
| 8 |
+
import datasets
|
| 9 |
+
import itertools
|
| 10 |
+
import typing as tp
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
SAMPLE_RATE = 16_000
|
| 15 |
+
|
| 16 |
+
_ZIP_FILENAME = 'timit/zip'
|
| 17 |
+
|
| 18 |
+
_CITATION = """\
|
| 19 |
+
@inproceedings{
|
| 20 |
+
title={TIMIT Acoustic-Phonetic Continuous Speech Corpus},
|
| 21 |
+
author={Garofolo, John S., et al},
|
| 22 |
+
ldc_catalog_no={LDC93S1},
|
| 23 |
+
DOI={https://doi.org/10.35111/17gk-bn40},
|
| 24 |
+
journal={Linguistic Data Consortium, Philadelphia},
|
| 25 |
+
year={1983}
|
| 26 |
+
}
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
_DESCRIPTION = """\
|
| 30 |
+
The TIMIT corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies
|
| 31 |
+
and for the evaluation of automatic speech recognition systems.
|
| 32 |
+
TIMIT contains high quality recordings of 630 individuals/speakers with 8 different American English dialects,
|
| 33 |
+
with each individual reading upto 10 phonetically rich sentences.
|
| 34 |
+
More info on TIMIT dataset can be understood from the "README" which can be found here:
|
| 35 |
+
https://catalog.ldc.upenn.edu/docs/LDC93S1/readme.txt
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
_HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC93S1"
|
| 39 |
+
_SPEAKERS = ['FADG0', 'FAEM0', 'FAJW0', 'FAKS0', 'FALK0', 'FALR0', 'FAPB0', 'FASW0', 'FAWF0', 'FBAS0', 'FBCG1', 'FBCH0', 'FBJL0', 'FBLV0', 'FBMH0', 'FBMJ0', 'FCAG0', 'FCAJ0', 'FCAL1', 'FCAU0', 'FCDR1', 'FCEG0', 'FCFT0', 'FCJF0', 'FCJS0', 'FCKE0', 'FCLT0', 'FCMG0', 'FCMH0', 'FCMH1', 'FCMM0', 'FCMR0', 'FCRH0', 'FCRZ0', 'FCYL0', 'FDAC1', 'FDAS1', 'FDAW0', 'FDFB0', 'FDHC0', 'FDJH0', 'FDKN0', 'FDML0', 'FDMS0', 'FDMY0', 'FDNC0', 'FDRD1', 'FDRW0', 'FDTD0', 'FDXW0', 'FEAC0', 'FEAR0', 'FECD0', 'FEDW0', 'FEEH0', 'FELC0', 'FEME0', 'FETB0', 'FEXM0', 'FGCS0', 'FGDP0', 'FGJD0', 'FGMB0', 'FGMD0', 'FGRW0', 'FGWR0', 'FHES0', 'FHEW0', 'FHLM0', 'FHXS0', 'FISB0', 'FJAS0', 'FJCS0', 'FJDM2', 'FJEM0', 'FJEN0', 'FJHK0', 'FJKL0', 'FJLG0', 'FJLM0', 'FJLR0', 'FJMG0', 'FJRB0', 'FJRE0', 'FJRP1', 'FJSA0', 'FJSJ0', 'FJSK0', 'FJSP0', 'FJWB0', 'FJWB1', 'FJXM0', 'FJXP0', 'FKAA0', 'FKDE0', 'FKDW0', 'FKFB0', 'FKKH0', 'FKLC0', 'FKLC1', 'FKLH0', 'FKMS0', 'FKSR0', 'FLAC0', 'FLAG0', 'FLAS0', 'FLBW0', 'FLEH0', 'FLET0', 'FLHD0', 'FLJA0', 'FLJD0', 'FLJG0', 'FLKD0', 'FLKM0', 'FLMA0', 'FLMC0', 'FLMK0', 'FLNH0', 'FLOD0', 'FLTM0', 'FMAF0', 'FMAH0', 'FMAH1', 'FMBG0', 'FMCM0', 'FMEM0', 'FMGD0', 'FMJB0', 'FMJF0', 'FMJU0', 'FMKC0', 'FMKF0', 'FMLD0', 'FMMH0', 'FMML0', 'FMPG0', 'FNKL0', 'FNLP0', 'FNMR0', 'FNTB0', 'FPAB1', 'FPAC0', 'FPAD0', 'FPAF0', 'FPAS0', 'FPAZ0', 'FPJF0', 'FPKT0', 'FPLS0', 'FPMY0', 'FRAM1', 'FREH0', 'FREW0', 'FRJB0', 'FRLL0', 'FRNG0', 'FSAG0', 'FSAH0', 'FSAK0', 'FSBK0', 'FSCN0', 'FSDC0', 'FSDJ0', 'FSEM0', 'FSGF0', 'FSJG0', 'FSJK1', 'FSJS0', 'FSJW0', 'FSKC0', 'FSKL0', 'FSKP0', 'FSLB1', 'FSLS0', 'FSMA0', 'FSMM0', 'FSMS1', 'FSPM0', 'FSRH0', 'FSSB0', 'FSXA0', 'FTAJ0', 'FTBR0', 'FTBW0', 'FTLG0', 'FTLH0', 'FTMG0', 'FUTB0', 'FVFB0', 'FVKB0', 'FVMH0', 'MABC0', 'MABW0', 'MADC0', 'MADD0', 'MAEB0', 'MAEO0', 'MAFM0', 'MAHH0', 'MAJC0', 'MAJP0', 'MAKB0', 'MAKR0', 'MAPV0', 'MARC0', 'MARW0', 'MBAR0', 'MBBR0', 'MBCG0', 'MBDG0', 'MBEF0', 'MBGT0', 'MBJK0', 'MBJV0', 'MBMA0', 'MBMA1', 'MBML0', 'MBNS0', 'MBOM0', 'MBPM0', 'MBSB0', 'MBTH0', 'MBWM0', 'MBWP0', 'MCAE0', 'MCAL0', 'MCCS0', 'MCDC0', 'MCDD0', 'MCDR0', 'MCEF0', 'MCEM0', 'MCEW0', 'MCHH0', 'MCHL0', 'MCLK0', 'MCLM0', 'MCMB0', 'MCMJ0', 'MCPM0', 'MCRC0', 'MCRE0', 'MCSH0', 'MCSS0', 'MCTH0', 'MCTM0', 'MCTT0', 'MCTW0', 'MCXM0', 'MDAB0', 'MDAC0', 'MDAC2', 'MDAS0', 'MDAW1', 'MDBB0', 'MDBB1', 'MDBP0', 'MDCD0', 'MDCM0', 'MDDC0', 'MDED0', 'MDEF0', 'MDEM0', 'MDHL0', 'MDHS0', 'MDJM0', 'MDKS0', 'MDLB0', 'MDLC0', 'MDLC1', 'MDLC2', 'MDLD0', 'MDLF0', 'MDLH0', 'MDLM0', 'MDLR0', 'MDLR1', 'MDLS0', 'MDMA0', 'MDMT0', 'MDNS0', 'MDPB0', 'MDPK0', 'MDPS0', 'MDRB0', 'MDRD0', 'MDRM0', 'MDSC0', 'MDSJ0', 'MDSS0', 'MDSS1', 'MDTB0', 'MDVC0', 'MDWA0', 'MDWD0', 'MDWH0', 'MDWK0', 'MDWM0', 'MEAL0', 'MEDR0', 'MEFG0', 'MEGJ0', 'MEJL0', 'MEJS0', 'MERS0', 'MESD0', 'MESG0', 'MESJ0', 'MEWM0', 'MFER0', 'MFGK0', 'MFMC0', 'MFRM0', 'MFWK0', 'MFXS0', 'MFXV0', 'MGAF0', 'MGAG0', 'MGAK0', 'MGAR0', 'MGAW0', 'MGES0', 'MGJC0', 'MGJF0', 'MGLB0', 'MGMM0', 'MGRL0', 'MGRP0', 'MGRT0', 'MGSH0', 'MGSL0', 'MGWT0', 'MGXP0', 'MHBS0', 'MHIT0', 'MHJB0', 'MHMG0', 'MHMR0', 'MHPG0', 'MHRM0', 'MHXL0', 'MILB0', 'MJAC0', 'MJAE0', 'MJAI0', 'MJAR0', 'MJBG0', 'MJBR0', 'MJDA0', 'MJDC0', 'MJDE0', 'MJDG0', 'MJDH0', 'MJDM0', 'MJDM1', 'MJEB0', 'MJEB1', 'MJEE0', 'MJES0', 'MJFC0', 'MJFH0', 'MJFR0', 'MJHI0', 'MJJB0', 'MJJG0', 'MJJJ0', 'MJJM0', 'MJKR0', 'MJLB0', 'MJLG1', 'MJLN0', 'MJLS0', 'MJMA0', 'MJMD0', 'MJMM0', 'MJMP0', 'MJPG0', 'MJPM0', 'MJPM1', 'MJRA0', 'MJRF0', 'MJRG0', 'MJRH0', 'MJRH1', 'MJRK0', 'MJRP0', 'MJSR0', 'MJSW0', 'MJTC0', 'MJTH0', 'MJVW0', 'MJWG0', 'MJWS0', 'MJWT0', 'MJXA0', 'MJXL0', 'MKAG0', 'MKAH0', 'MKAJ0', 'MKAM0', 'MKCH0', 'MKCL0', 'MKDB0', 'MKDD0', 'MKDR0', 'MKDT0', 'MKES0', 'MKJL0', 'MKJO0', 'MKLN0', 'MKLR0', 'MKLS0', 'MKLS1', 'MKLT0', 'MKLW0', 'MKRG0', 'MKXL0', 'MLBC0', 'MLEL0', 'MLIH0', 'MLJB0', 'MLJC0', 'MLJH0', 'MLLL0', 'MLNS0', 'MLNT0', 'MLSH0', 'MMAA0', 'MMAB0', 'MMAB1', 'MMAG0', 'MMAM0', 'MMAR0', 'MMBS0', 'MMCC0', 'MMDB0', 'MMDB1', 'MMDG0', 'MMDH0', 'MMDM0', 'MMDM1', 'MMDM2', 'MMDS0', 'MMEA0', 'MMEB0', 'MMGC0', 'MMGG0', 'MMGK0', 'MMJB1', 'MMJR0', 'MMLM0', 'MMPM0', 'MMRP0', 'MMSM0', 'MMVP0', 'MMWB0', 'MMWH0', 'MMWS0', 'MMWS1', 'MMXS0', 'MNET0', 'MNJM0', 'MNLS0', 'MNTW0', 'MPAB0', 'MPAM0', 'MPAM1', 'MPAR0', 'MPCS0', 'MPDF0', 'MPEB0', 'MPFU0', 'MPGH0', 'MPGL0', 'MPGR0', 'MPGR1', 'MPLB0', 'MPMB0', 'MPPC0', 'MPRB0', 'MPRD0', 'MPRK0', 'MPRT0', 'MPSW0', 'MPWM0', 'MRAB0', 'MRAB1', 'MRAI0', 'MRAM0', 'MRAV0', 'MRBC0', 'MRCG0', 'MRCS0', 'MRCW0', 'MRCZ0', 'MRDD0', 'MRDM0', 'MRDS0', 'MREB0', 'MREE0', 'MREH1', 'MREM0', 'MRES0', 'MREW1', 'MRFK0', 'MRFL0', 'MRGG0', 'MRGM0', 'MRGS0', 'MRHL0', 'MRJB1', 'MRJH0', 'MRJM0', 'MRJM1', 'MRJM3', 'MRJM4', 'MRJO0', 'MRJR0', 'MRJS0', 'MRJT0', 'MRKM0', 'MRKO0', 'MRLD0', 'MRLJ0', 'MRLJ1', 'MRLK0', 'MRLR0', 'MRMB0', 'MRMG0', 'MRMH0', 'MRML0', 'MRMS0', 'MRMS1', 'MROA0', 'MRPC0', 'MRPC1', 'MRPP0', 'MRRE0', 'MRRK0', 'MRSO0', 'MRSP0', 'MRTC0', 'MRTJ0', 'MRTK0', 'MRVG0', 'MRWA0', 'MRWS0', 'MRWS1', 'MRXB0', 'MSAH1', 'MSAS0', 'MSAT0', 'MSAT1', 'MSDB0', 'MSDH0', 'MSDS0', 'MSEM1', 'MSES0', 'MSFH0', 'MSFH1', 'MSFV0', 'MSJK0', 'MSJS1', 'MSLB0', 'MSMC0', 'MSMR0', 'MSMS0', 'MSRG0', 'MSRR0', 'MSTF0', 'MSTK0', 'MSVS0', 'MTAA0', 'MTAB0', 'MTAS0', 'MTAS1', 'MTAT0', 'MTAT1', 'MTBC0', 'MTCS0', 'MTDB0', 'MTDP0', 'MTDT0', 'MTEB0', 'MTER0', 'MTHC0', 'MTJG0', 'MTJM0', 'MTJS0', 'MTJU0', 'MTKD0', 'MTKP0', 'MTLB0', 'MTLC0', 'MTLS0', 'MTML0', 'MTMN0', 'MTMR0', 'MTMT0', 'MTPF0', 'MTPG0', 'MTPP0', 'MTPR0', 'MTQC0', 'MTRC0', 'MTRR0', 'MTRT0', 'MTWH0', 'MTWH1', 'MTXS0', 'MVJH0', 'MVLO0', 'MVRW0', 'MWAC0', 'MWAD0', 'MWAR0', 'MWBT0', 'MWCH0', 'MWDK0', 'MWEM0', 'MWEW0', 'MWGR0', 'MWJG0', 'MWRE0', 'MWRP0', 'MWSB0', 'MWSH0', 'MWVW0', 'MZMB0']
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class TimitConfig(datasets.BuilderConfig):
|
| 43 |
+
"""BuilderConfig for TIMIT."""
|
| 44 |
+
|
| 45 |
+
def __init__(self, features, **kwargs):
|
| 46 |
+
super(TimitConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
|
| 47 |
+
self.features = features
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class TIMIT(datasets.GeneratorBasedBuilder):
|
| 51 |
+
|
| 52 |
+
BUILDER_CONFIGS = [
|
| 53 |
+
TimitConfig(
|
| 54 |
+
features=datasets.Features(
|
| 55 |
+
{
|
| 56 |
+
"file": datasets.Value("string"),
|
| 57 |
+
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
|
| 58 |
+
"speaker_id": datasets.Value("string"),
|
| 59 |
+
"label": datasets.ClassLabel(names=_SPEAKERS),
|
| 60 |
+
}
|
| 61 |
+
),
|
| 62 |
+
name="si",
|
| 63 |
+
description=textwrap.dedent(
|
| 64 |
+
"""\
|
| 65 |
+
Speaker Identification (SI) classifies each utterance for its speaker identity as a multi-class
|
| 66 |
+
classification, where speakers are in the same pre-defined set for both training and testing.
|
| 67 |
+
The evaluation metric is accuracy (ACC).
|
| 68 |
+
"""
|
| 69 |
+
),
|
| 70 |
+
),
|
| 71 |
+
TimitConfig(
|
| 72 |
+
features=datasets.Features(
|
| 73 |
+
{
|
| 74 |
+
"file": datasets.Value("string"),
|
| 75 |
+
"audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
|
| 76 |
+
"phonetic_detail": datasets.Sequence(
|
| 77 |
+
{
|
| 78 |
+
"start": datasets.Value("int64"),
|
| 79 |
+
"stop": datasets.Value("int64"),
|
| 80 |
+
"utterance": datasets.Value("string"),
|
| 81 |
+
}
|
| 82 |
+
),
|
| 83 |
+
"word_detail": datasets.Sequence(
|
| 84 |
+
{
|
| 85 |
+
"start": datasets.Value("int64"),
|
| 86 |
+
"stop": datasets.Value("int64"),
|
| 87 |
+
"utterance": datasets.Value("string"),
|
| 88 |
+
}
|
| 89 |
+
),
|
| 90 |
+
"text": datasets.Value("string"),
|
| 91 |
+
}
|
| 92 |
+
),
|
| 93 |
+
name="asr",
|
| 94 |
+
description=textwrap.dedent(
|
| 95 |
+
"""\
|
| 96 |
+
ASR transcribes utterances into words. While PR analyses the
|
| 97 |
+
improvement in modeling phonetics, ASR reflects the significance of
|
| 98 |
+
the improvement in a real-world scenario.
|
| 99 |
+
The evaluation metric is word error rate (WER).
|
| 100 |
+
"""
|
| 101 |
+
),
|
| 102 |
+
)
|
| 103 |
+
]
|
| 104 |
+
|
| 105 |
+
def _info(self):
|
| 106 |
+
return datasets.DatasetInfo(
|
| 107 |
+
description=_DESCRIPTION,
|
| 108 |
+
features=self.config.features,
|
| 109 |
+
supervised_keys=None,
|
| 110 |
+
homepage=_HOMEPAGE,
|
| 111 |
+
citation=_CITATION,
|
| 112 |
+
task_templates=None,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
def _split_generators(self, dl_manager):
|
| 116 |
+
"""Returns SplitGenerators."""
|
| 117 |
+
archive_path = dl_manager.extract(_ZIP_FILENAME)
|
| 118 |
+
|
| 119 |
+
if self.config.name == "si":
|
| 120 |
+
return [
|
| 121 |
+
datasets.SplitGenerator(
|
| 122 |
+
name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}
|
| 123 |
+
),
|
| 124 |
+
datasets.SplitGenerator(
|
| 125 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path, "split": "validation"}
|
| 126 |
+
),
|
| 127 |
+
datasets.SplitGenerator(
|
| 128 |
+
name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
|
| 129 |
+
),
|
| 130 |
+
]
|
| 131 |
+
elif self.config.name == 'asr':
|
| 132 |
+
return [
|
| 133 |
+
datasets.SplitGenerator(
|
| 134 |
+
name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}
|
| 135 |
+
),
|
| 136 |
+
datasets.SplitGenerator(
|
| 137 |
+
name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
|
| 138 |
+
),
|
| 139 |
+
]
|
| 140 |
+
|
| 141 |
+
def _generate_examples(self, archive_path, split=None):
|
| 142 |
+
if self.config.name == 'si':
|
| 143 |
+
extensions = ['.wav']
|
| 144 |
+
_, _walker = fast_scandir(archive_path, extensions, recursive=True)
|
| 145 |
+
_train_walker, _val_walker, _test_walker = [], [], []
|
| 146 |
+
_pair_list = [(Path(fileid).parent.stem, fileid) for fileid in _walker]
|
| 147 |
+
for key, group in itertools.groupby(_pair_list, lambda x: x[0]):
|
| 148 |
+
_files_per_speaker = [item[1] for item in group]
|
| 149 |
+
_train_walker.extend(_files_per_speaker[:6])
|
| 150 |
+
_val_walker.extend(_files_per_speaker[6:8])
|
| 151 |
+
_test_walker.extend(_files_per_speaker[8:])
|
| 152 |
+
if split == 'train':
|
| 153 |
+
files = _train_walker
|
| 154 |
+
elif split == 'validation':
|
| 155 |
+
files = _val_walker
|
| 156 |
+
elif split == 'test':
|
| 157 |
+
files = _test_walker
|
| 158 |
+
for guid, audio_path in enumerate(files):
|
| 159 |
+
yield guid, {
|
| 160 |
+
"id": str(guid),
|
| 161 |
+
"file": audio_path,
|
| 162 |
+
"audio": audio_path,
|
| 163 |
+
"speaker_id": Path(audio_path).parent.stem,
|
| 164 |
+
"label": Path(audio_path).parent.stem,
|
| 165 |
+
}
|
| 166 |
+
elif self.config.name == 'asr':
|
| 167 |
+
wav_paths = sorted(Path(archive_path).glob(f"**/{split}/**/*.wav"))
|
| 168 |
+
wav_paths = wav_paths if wav_paths else sorted(Path(archive_path).glob(f"**/{split.upper()}/**/*.wav"))
|
| 169 |
+
for guid, wav_path in enumerate(wav_paths):
|
| 170 |
+
# Extract phonemes
|
| 171 |
+
phn_path = with_case_insensitive_suffix(Path(str(wav_path).replace('.wav', '')), ".phn")
|
| 172 |
+
with phn_path.open(encoding="utf-8") as op:
|
| 173 |
+
phonetic_detail = [
|
| 174 |
+
{
|
| 175 |
+
"start": i.split(" ")[0],
|
| 176 |
+
"stop": i.split(" ")[1],
|
| 177 |
+
"utterance": " ".join(i.split(" ")[2:]).strip(),
|
| 178 |
+
}
|
| 179 |
+
for i in op.readlines()
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
# Extract words
|
| 183 |
+
txt_path = with_case_insensitive_suffix(Path(str(wav_path).replace('.wav', '')), ".wrd")
|
| 184 |
+
with txt_path.open(encoding="utf-8") as op:
|
| 185 |
+
word_detail = [
|
| 186 |
+
{
|
| 187 |
+
"start": i.split(" ")[0],
|
| 188 |
+
"stop": i.split(" ")[1],
|
| 189 |
+
"utterance": " ".join(i.split(" ")[2:]).strip(),
|
| 190 |
+
}
|
| 191 |
+
for i in op.readlines()
|
| 192 |
+
]
|
| 193 |
+
text = ' '.join([w['utterance'] for w in word_detail])
|
| 194 |
+
yield guid, {
|
| 195 |
+
"id": str(guid),
|
| 196 |
+
"file": str(wav_path),
|
| 197 |
+
"audio": str(wav_path),
|
| 198 |
+
"phonetic_detail": phonetic_detail,
|
| 199 |
+
"word_detail": word_detail,
|
| 200 |
+
"text": text,
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def fast_scandir(path: str, exts: tp.List[str], recursive: bool = False):
|
| 205 |
+
# Scan files recursively faster than glob
|
| 206 |
+
# From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
|
| 207 |
+
subfolders, files = [], []
|
| 208 |
+
|
| 209 |
+
try: # hope to avoid 'permission denied' by this try
|
| 210 |
+
for f in os.scandir(path):
|
| 211 |
+
try: # 'hope to avoid too many levels of symbolic links' error
|
| 212 |
+
if f.is_dir():
|
| 213 |
+
subfolders.append(f.path)
|
| 214 |
+
elif f.is_file():
|
| 215 |
+
if os.path.splitext(f.name)[1].lower() in exts:
|
| 216 |
+
files.append(f.path)
|
| 217 |
+
except Exception:
|
| 218 |
+
pass
|
| 219 |
+
except Exception:
|
| 220 |
+
pass
|
| 221 |
+
|
| 222 |
+
if recursive:
|
| 223 |
+
for path in list(subfolders):
|
| 224 |
+
sf, f = fast_scandir(path, exts, recursive=recursive)
|
| 225 |
+
subfolders.extend(sf)
|
| 226 |
+
files.extend(f) # type: ignore
|
| 227 |
+
|
| 228 |
+
return subfolders, files
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def with_case_insensitive_suffix(path: Path, suffix: str):
|
| 232 |
+
path = path.with_suffix(suffix.lower())
|
| 233 |
+
path = path if path.exists() else path.with_suffix(suffix.upper())
|
| 234 |
+
return path
|