|
|
import os |
|
|
import json |
|
|
import tarfile |
|
|
import datasets |
|
|
|
|
|
class LEMASDataset(datasets.GeneratorBasedBuilder): |
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description="LEMAS multilingual training dataset with split audio archives", |
|
|
features=datasets.Features( |
|
|
{ |
|
|
"key": datasets.Value("string"), |
|
|
"audio": datasets.Audio(), |
|
|
"dur": datasets.Value("float32"), |
|
|
"txt": datasets.Value("string"), |
|
|
"align": datasets.features.Sequence({ |
|
|
"txt": datasets.Value("string"), |
|
|
"words": datasets.features.Sequence({ |
|
|
"word": datasets.Value("string"), |
|
|
"start": datasets.Value("float32"), |
|
|
"end": datasets.Value("float32"), |
|
|
"score": datasets.Value("float32"), |
|
|
}), |
|
|
}), |
|
|
} |
|
|
), |
|
|
supervised_keys=("audio", "txt"), |
|
|
homepage="https://your-project-homepage", |
|
|
license="Apache-2.0", |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
data_dir = dl_manager.extract("LEMAS-Dataset/train") |
|
|
|
|
|
|
|
|
languages = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))] |
|
|
all_generators = [] |
|
|
for lang in languages: |
|
|
lang_dir = os.path.join(data_dir, lang) |
|
|
jsonl_files = sorted([f for f in os.listdir(lang_dir) if f.endswith(".jsonl")]) |
|
|
tar_files = sorted([f for f in os.listdir(lang_dir) if f.endswith(".tar.gz")]) |
|
|
|
|
|
|
|
|
for jsonl_file in jsonl_files: |
|
|
prefix = jsonl_file[:-6] |
|
|
tar_file = prefix + ".tar.gz" |
|
|
if tar_file not in tar_files: |
|
|
continue |
|
|
all_generators.append( |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"jsonl_path": os.path.join(lang_dir, jsonl_file), |
|
|
"audio_archive_path": os.path.join(lang_dir, tar_file), |
|
|
}, |
|
|
) |
|
|
) |
|
|
return all_generators |
|
|
|
|
|
def _generate_examples(self, jsonl_path, audio_archive_path): |
|
|
|
|
|
archive = tarfile.open(audio_archive_path, "r:gz") |
|
|
members = {m.name: m for m in archive.getmembers()} |
|
|
|
|
|
def get_audio_bytes(path_in_archive): |
|
|
if path_in_archive not in members: |
|
|
raise FileNotFoundError(f"Audio {path_in_archive} not found in archive {audio_archive_path}") |
|
|
f = archive.extractfile(members[path_in_archive]) |
|
|
if f is None: |
|
|
raise FileNotFoundError(f"Cannot extract {path_in_archive} from {audio_archive_path}") |
|
|
return f.read() |
|
|
|
|
|
with open(jsonl_path, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
example = json.loads(line) |
|
|
audio_bytes = get_audio_bytes(example["audio"]) |
|
|
yield example["key"], { |
|
|
"key": example["key"], |
|
|
"audio": {"bytes": audio_bytes, "path": example["audio"]}, |
|
|
"dur": example["dur"], |
|
|
"txt": example["txt"], |
|
|
"align": example["align"], |
|
|
} |
|
|
archive.close() |
|
|
|