| |
|
|
| """SEP-28K dataset.""" |
|
|
| import os |
| import gzip |
| import shutil |
| import urllib.request |
| from typing import List |
| from pathlib import Path |
|
|
| import librosa |
| import datasets |
| import pandas as pd |
| from rich import print |
| from tqdm.auto import tqdm |
|
|
| SAMPLING_RATE = 16_000 |
|
|
| CLASSES = ['block', 'prolongation', 'sound_rep', 'word_rep', 'interjection', 'no_dysfluencies'] |
|
|
|
|
| class SEP28KConfig(datasets.BuilderConfig): |
| """BuilderConfig for SEP-28K.""" |
| |
| def __init__(self, features, **kwargs): |
| super(SEP28KConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs) |
| self.features = features |
|
|
|
|
| class SEP28K(datasets.GeneratorBasedBuilder): |
|
|
| BUILDER_CONFIGS = [ |
| SEP28KConfig( |
| features=datasets.Features( |
| { |
| "audio": datasets.Audio(sampling_rate=SAMPLING_RATE), |
| |
| |
| "start": datasets.Value("int32"), |
| "end": datasets.Value("int32"), |
| "stutter": datasets.Sequence(datasets.Value("string")), |
| "label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), |
| } |
| ), |
| name="sep28k", |
| description="", |
| ), |
| SEP28KConfig( |
| features=datasets.Features( |
| { |
| "audio": datasets.Audio(sampling_rate=SAMPLING_RATE), |
| |
| |
| "start": datasets.Value("int32"), |
| "end": datasets.Value("int32"), |
| "stutter": datasets.Sequence(datasets.Value("string")), |
| "label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), |
| } |
| ), |
| name="fluencybank", |
| description="", |
| ), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "sep28k" |
|
|
| def __init__( |
| self, |
| cache_dir = None, |
| dataset_name = None, |
| config_name = None, |
| hash = None, |
| base_path = None, |
| info = None, |
| features = None, |
| token = None, |
| repo_id = None, |
| data_files = None, |
| data_dir = None, |
| storage_options = None, |
| writer_batch_size = None, |
| **config_kwargs |
| ): |
| super().__init__( |
| cache_dir, |
| dataset_name, |
| config_name, |
| hash, |
| base_path, |
| info, |
| features, |
| token, |
| repo_id, |
| data_files, |
| data_dir, |
| storage_options, |
| writer_batch_size, |
| **config_kwargs |
| ) |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description="SEP-28K dataset", |
| features=self.config.features, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| if dl_manager.manual_dir is None: |
| from datasets.config import HF_DATASETS_CACHE |
|
|
| data_dir = os.path.join(HF_DATASETS_CACHE, "downloads") |
| print(f'`data_dir` is None, set the path to {data_dir}') |
| else: |
| data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) |
|
|
| if not os.path.exists(os.path.join(data_dir, 'clips')): |
| download_file( |
| 'https://huggingface.co/datasets/confit/sep-28k/resolve/main/archive.zip', |
| dest=os.path.join(data_dir, 'archive.zip'), |
| unpack=True, |
| write_permissions=True |
| ) |
|
|
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir}), |
| ] |
|
|
| def _generate_examples(self, data_dir): |
| """Generate examples from SEP-28K""" |
| if self.config.name == 'sep28k': |
| metadata_df = pd.read_csv(os.path.join(data_dir, 'SEP-28k_labels.csv')) |
| elif self.config.name == 'fluencybank': |
| metadata_df = pd.read_csv(os.path.join(data_dir, 'fluencybank_labels.csv')) |
| metadata_df = metadata_df[metadata_df['Unsure'] == 0].reset_index(drop=True) |
| print(metadata_df) |
| |
| threshold = 2 |
| _mapping = {} |
| for idx, row in metadata_df.iterrows(): |
| filename = f"{row['Show']}_{row['EpId']}_{row['ClipId']}" |
| start = row['Start'] |
| end = row['Stop'] |
| block = 1 if row['Block'] >= threshold else 0 |
| prolongation = 1 if row['Prolongation'] >= threshold else 0 |
| sound_rep = 1 if row['SoundRep'] >= threshold else 0 |
| word_rep = 1 if row['WordRep'] >= threshold else 0 |
| interjection = 1 if row['Interjection'] >= threshold else 0 |
| |
| dysfluencies = sum([prolongation, block, sound_rep, word_rep, interjection]) |
| no_dysfluencies = 1 if dysfluencies == 0 else 0 |
|
|
| stutter = [] |
| if block == 1: |
| stutter.append('block') |
| if prolongation == 1: |
| stutter.append('prolongation') |
| if sound_rep == 1: |
| stutter.append('sound_rep') |
| if word_rep == 1: |
| stutter.append('word_rep') |
| if interjection == 1: |
| stutter.append('interjection') |
| if no_dysfluencies == 1: |
| stutter.append('no_dysfluencies') |
|
|
| _mapping[filename] = { |
| 'filename': filename, |
| 'start': start, |
| 'end': end, |
| 'block': block, |
| 'prolongation': prolongation, |
| 'sound_rep': sound_rep, |
| 'word_rep': word_rep, |
| 'interjection': interjection, |
| 'no_dysfluencies': no_dysfluencies, |
| 'stutter': stutter, |
| } |
|
|
| |
| extensions = ['.wav'] |
|
|
| _, wav_paths = fast_scandir(data_dir, extensions, recursive=True) |
|
|
| for guid, wav_path in enumerate(wav_paths): |
| |
| |
| |
| try: |
| fileid = Path(wav_path).stem |
| info = _mapping[fileid] |
| yield guid, { |
| "id": str(guid), |
| "audio": wav_path, |
| "stutter": info['stutter'], |
| "label": info['stutter'], |
| "start": start, |
| "end": end, |
| } |
| except: |
| continue |
|
|
|
|
| def fast_scandir(path: str, extensions: List[str], recursive: bool = False): |
| |
| |
| subfolders, files = [], [] |
|
|
| try: |
| for f in os.scandir(path): |
| try: |
| if f.is_dir(): |
| subfolders.append(f.path) |
| elif f.is_file(): |
| if os.path.splitext(f.name)[1].lower() in extensions: |
| files.append(f.path) |
| except Exception: |
| pass |
| except Exception: |
| pass |
|
|
| if recursive: |
| for path in list(subfolders): |
| sf, f = fast_scandir(path, extensions, recursive=recursive) |
| subfolders.extend(sf) |
| files.extend(f) |
|
|
| return subfolders, files |
|
|
|
|
| def download_file( |
| source, |
| dest, |
| unpack=False, |
| dest_unpack=None, |
| replace_existing=False, |
| write_permissions=False, |
| ): |
| """Downloads the file from the given source and saves it in the given |
| destination path. |
| Arguments |
| --------- |
| source : path or url |
| Path of the source file. If the source is an URL, it downloads it from |
| the web. |
| dest : path |
| Destination path. |
| unpack : bool |
| If True, it unpacks the data in the dest folder. |
| dest_unpack: path |
| Path where to store the unpacked dataset |
| replace_existing : bool |
| If True, replaces the existing files. |
| write_permissions: bool |
| When set to True, all the files in the dest_unpack directory will be granted write permissions. |
| This option is active only when unpack=True. |
| """ |
| class DownloadProgressBar(tqdm): |
| """DownloadProgressBar class.""" |
|
|
| def update_to(self, b=1, bsize=1, tsize=None): |
| """Needed to support multigpu training.""" |
| if tsize is not None: |
| self.total = tsize |
| self.update(b * bsize - self.n) |
|
|
| |
| dest_dir = Path(dest).resolve().parent |
| dest_dir.mkdir(parents=True, exist_ok=True) |
| if "http" not in source: |
| shutil.copyfile(source, dest) |
|
|
| elif not os.path.isfile(dest) or ( |
| os.path.isfile(dest) and replace_existing |
| ): |
| print(f"Downloading {source} to {dest}") |
| with DownloadProgressBar( |
| unit="B", |
| unit_scale=True, |
| miniters=1, |
| desc=source.split("/")[-1], |
| ) as t: |
| urllib.request.urlretrieve( |
| source, filename=dest, reporthook=t.update_to |
| ) |
| else: |
| print(f"{dest} exists. Skipping download") |
|
|
| |
| if unpack: |
| if dest_unpack is None: |
| dest_unpack = os.path.dirname(dest) |
| print(f"Extracting {dest} to {dest_unpack}") |
| |
| if ( |
| source.endswith(".tar.gz") |
| or source.endswith(".tgz") |
| or source.endswith(".gz") |
| ): |
| out = dest.replace(".gz", "") |
| with gzip.open(dest, "rb") as f_in: |
| with open(out, "wb") as f_out: |
| shutil.copyfileobj(f_in, f_out) |
| else: |
| shutil.unpack_archive(dest, dest_unpack) |
| if write_permissions: |
| set_writing_permissions(dest_unpack) |
|
|
|
|
| def set_writing_permissions(folder_path): |
| """ |
| This function sets user writing permissions to all the files in the given folder. |
| Arguments |
| --------- |
| folder_path : folder |
| Folder whose files will be granted write permissions. |
| """ |
| for root, dirs, files in os.walk(folder_path): |
| for file_name in files: |
| file_path = os.path.join(root, file_name) |
| |
| os.chmod(file_path, 0o666) |