transcribed_calls / prepare_nemo_manifest.py
mattbonnell's picture
crop audio files
a641f99
from datasets import Dataset, DatasetDict, IterableDataset, IterableDatasetDict, concatenate_datasets, load_dataset
import re
import soundfile as sf
TDataset = DatasetDict | Dataset | IterableDatasetDict | IterableDataset
def remove_special_tags(dataset: TDataset, label_column_name: str = 'words', **kwargs) -> TDataset:
special_tag_pattern = r'(\[\w+( [0-9])?:?(\w+)?\] )'
def remove_special_tags(batch):
batch[label_column_name] = re.sub(special_tag_pattern, '', batch[label_column_name]).lower()
return batch
dataset = dataset.map(remove_special_tags, **kwargs)
return dataset
def remove_bad_files(dataset: TDataset, **kwargs) -> TDataset:
def is_file_good(batch):
info = sf.info(batch['path'])
if info.channels == 0:
print(f"File {batch['path']} has no channels. Removing it from the dataset.")
return False
if info.duration == 0:
print(f"File {batch['path']} has duration 0. Removing it from the dataset.")
return False
return True
dataset = dataset.filter(is_file_good, **kwargs)
return dataset
if __name__ == "__main__":
print("loading dataset...")
dataset = load_dataset('./transcribed_calls.py', trust_remote_code=True)
print('removing bad files...')
dataset = remove_bad_files(dataset)
print("removing special tags...")
dataset = remove_special_tags(dataset)
print("concatenating train and test datasets...")
dataset = concatenate_datasets([dataset['train'], dataset['test']])
print("creating nemo_manifest.json...")
with open('nemo_manifest.json', 'w') as f:
for example in dataset:
f.write(f'{{"audio_filepath": "{example["path"]}", "text": "{example["words"]}"}}\n')
print('nemo_manifest.json created')