speech_flame_codes / speech_flame_codes.py
subatomicseer's picture
Update speech_flame_codes.py
b7d6e89 verified
# Lint as: python3
"""semantic, acoustic and flame codes dataset.
"""
import glob
import os
import datasets
import torch
class SpeechFlameCodesDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for Speech-Flame Codes dataset."""
def __init__(self, **kwargs):
super(SpeechFlameCodesDatasetConfig, self).__init__(**kwargs)
class SpeechFlameCodesDataset(datasets.GeneratorBasedBuilder):
"""Codes dataset."""
BUILDER_CONFIGS = [
SpeechFlameCodesDatasetConfig(name="all", description="SpeechFlameCodes dataset"),
]
@property
def manual_download_instructions(self):
return (
"Codes should be computed before using this dataset. "
"`datasets.load_dataset('/path/to/this/script', name=all, data_dir='path/to/folder/folder_name/of/codes')`"
)
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"length": datasets.Value("int32"),
"acoustic_tokens": datasets.Array2D(shape=(None, 12), dtype="int16"),
"semantic_tokens": datasets.Array2D(shape=(None, 1), dtype="int16"),
"flame_tokens": datasets.Array2D(shape=(None, 1), dtype="int16"),
}
)
return datasets.DatasetInfo(
features=features,
)
def _split_generators(self, dl_manager):
base_data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir or ""))
if not os.path.exists(base_data_dir):
raise FileNotFoundError(
f"{base_data_dir} does not exist. Make sure you insert a manual dir via "
f"`datasets.load_dataset('/this/script', data_dir=...)` "
f"that includes code files .pt files "
f"dataset. Manual download instructions: {self.manual_download_instructions}"
)
train_data_dirs = glob.glob(os.path.join(base_data_dir, "*.pt"), recursive=False)
train_data_dirs = [d for d in train_data_dirs if '.ipynb_checkpoints' not in d]
return [
datasets.SplitGenerator(
name=str(datasets.Split.TRAIN),
gen_kwargs={"data_dirs": train_data_dirs},
),
]
def _generate_examples(self, data_dirs):
for key, path in enumerate(data_dirs):
id_ = path.split("/")[-1].replace(".pt", "")
data = torch.load(path, map_location="cpu", weights_only=False)
acoustic_tokens = data["acoustic_codes"].transpose(0, 1)
semantic_tokens = data["semantic_codes"].unsqueeze(-1)
flame_tokens = data["flame_codes"].unsqueeze(-1)
yield id_, {
"id": id_,
"acoustic_tokens": acoustic_tokens,
"semantic_tokens": semantic_tokens,
"flame_tokens": flame_tokens,
}