code
stringlengths
17
6.64M
def run_cli(): ' ' _ = LightningCLI(save_config_callback=SaveConfigCallbackWanb)
def main(): ' ' load_dotenv() run_cli()
def dataset(): 'CLI entrypoint for the dataset preparation script.' load_dotenv() parser = ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) (parser.add_argument('config', type=str, help='Path to a config file with arguments.'),) parser.add_argument('--preprocess', action='store_true', help='Whether to download and preprocess the raw dataset.') parser.add_argument('--preprocess_features', action='store_true', help='Preprocess features on an already downloaded dataset.') parser.add_argument('--archive', type=str, default=None, help='If set, will archve the preprocessed dataset into the given path.') parser.add_argument('--upload', type=str, default=None, help='If set, will upload an archived dataset to the bucket KickDataset bucket') parser.add_argument('--verify', action='store_true', help='If set, will check that all dataset files are present.') args = parser.parse_args(sys.argv[1:]) datamodule_parser = ArgumentParser() datamodule_parser.add_subclass_arguments(AudioDataModule, 'datamodule') if (args.config is not None): with open(args.config, 'r') as f: config = yaml.safe_load(f) config = {'datamodule': config} datamodule_args = datamodule_parser.parse_object(config) datamodule = datamodule_parser.instantiate_classes(datamodule_args).datamodule if (args.archive is not None): datamodule.archive_dataset(args.archive) return if (args.upload is not None): data_utils.upload_file_r2(args.upload, datamodule.url, datamodule.bucket) return if args.preprocess_features: datamodule.preprocess_features(overwrite=True) return if args.verify: verify_dataset(datamodule) return datamodule.prepare_data(use_preprocessed=(not args.preprocess))
def verify_dataset(datamodule: LightningDataModule): '\n Verify that all files in the dataset are present.\n ' for split in ['fit', 'validate', 'test']: datamodule.setup(split) if (split == 'fit'): dataset = datamodule.train_dataloader().dataset elif (split == 'validate'): dataset = datamodule.val_dataloader().dataset else: dataset = datamodule.test_dataloader().dataset for i in tqdm(range(len(dataset))): _ = dataset[i]
def inference(): '\n Given an input audio, compute reconstruction.\n\n Optionally, can pass in different audio files for sinusoidal, noise, and transient\n embeddings.\n ' load_dotenv() parser = ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('config', type=str, help='Path to a config file with arguments.') parser.add_argument('checkpoint', type=str, help='Path to a checkpoint file.') parser.add_argument('input', type=str, help='Path to input audio file') parser.add_argument('output', type=str, help='Path to save audio to') args = parser.parse_args(sys.argv[1:]) (model, _) = model_utils.load_model(args.config, args.checkpoint) (audio, input_sr) = torchaudio.load(args.input) audio = audio[:1] data_config = model_utils.load_config_yaml(args.config)['data']['init_args'] sample_rate = data_config['sample_rate'] if (input_sr != sample_rate): audio = torchaudio.transforms.Resample(orig_freq=input_sr, new_freq=sample_rate)(audio) if (audio.shape[1] < data_config['num_samples']): num_pad = (data_config['num_samples'] - audio.shape[1]) audio = torch.nn.functional.pad(audio, (0, num_pad)) cqt_args = inspect.getfullargspec(CQTModalAnalysis.__init__).args cqt_kwargs = {key: data_config[key] for key in cqt_args if (key in data_config)} modal = CQTModalAnalysis(**cqt_kwargs) (modal_freqs, modal_amps, modal_phases) = modal(audio) modal_freqs = (((2 * torch.pi) * modal_freqs) / sample_rate) modal_tensor = torch.stack([modal_freqs, modal_amps, modal_phases]) modal_tensor = rearrange(modal_tensor, 's 1 m f -> 1 s m f') y_hat = model(audio.unsqueeze(0), modal_tensor) torchaudio.save(args.output, y_hat.squeeze(0), sample_rate)
class AudioDataset(Dataset): "\n Dataset of audio files.\n\n Args:\n data_dir: Path to the directory containing the dataset.\n meta_file: Name of the json metadata file.\n sample_rate: Expected sample rate of the audio files.\n num_samples: Expected number of samples in the audio files.\n split (optional): Split to return. Must be one of 'train', 'val', or 'test'.\n If None, the entire dataset is returned.\n seed: Seed for random number generator used to split the dataset.\n " def __init__(self, data_dir: Union[(str, Path)], meta_file: str, sample_rate: int, num_samples: int, split: Optional[str]=None, seed: int=42, split_strategy: Literal[('sample_pack', 'random')]='random', normalize: bool=False, sample_types: Optional[List[str]]=None, instruments: Optional[List[str]]=None): super().__init__() self.data_dir = Path(data_dir) self.meta_file = meta_file self.sample_rate = sample_rate self.num_samples = num_samples self.seed = seed self.normalize = normalize self.sample_types = sample_types self.instruments = instruments if (not self.data_dir.exists()): raise FileNotFoundError(f'Preprocessed dataset not found. Expected: {self.data_dir}') with open(self.data_dir.joinpath(self.meta_file), 'r') as f: self.metadata = json.load(f) self.file_list = list(self.metadata.keys()) if (split is not None): if (split_strategy == 'sample_pack'): self._sample_pack_split(split) elif (split_strategy == 'random'): self._random_split(split) else: raise ValueError("Invalid split strategy. Expected one of 'sample_pack' or 'random'.") def __len__(self): return len(self.file_list) def __getitem__(self, idx) -> Tuple[torch.Tensor]: audio_filename = self.metadata[self.file_list[idx]]['filename'] (waveform, sample_rate) = torchaudio.load(self.data_dir.joinpath(audio_filename)) assert (sample_rate == self.sample_rate), 'Sample rate mismatch.' assert (waveform.shape == (1, self.num_samples)), 'Incorrect input audio shape.' if self.normalize: waveform = (waveform / waveform.abs().max()) return (waveform,) def _sample_pack_split(self, split: str, test_size: float=0.1, val_size: float=0.1): split_metadata = self._sample_pack_split_metadata(split, test_size, val_size) self.file_list = split_metadata.index.tolist() log.info(f'Number of samples in {split} set: {len(self.file_list)}') def _sample_pack_split_metadata(self, split: str, test_size: float=0.1, val_size: float=0.1): "\n Split the dataset into train, validation, and test sets. This creates splits\n that are disjont with respect to sample packs and have same the proportion of\n sample types. It performsn a greedy assignment of samples to splits, starting\n with the test set, then the validation set, and finally the training set.\n\n Args:\n split: Split to return. Must be one of 'train', 'val', or 'test'.\n " if (split not in ['train', 'val', 'test']): raise ValueError("Invalid split. Must be one of 'train', 'val', or 'test'.") data = pd.DataFrame.from_dict(self.metadata, orient='index') data_types = data.groupby('type').size().reset_index(name='counts') if (self.sample_types is not None): data_types = data_types[data_types['type'].isin(self.sample_types)] log.info(f'Filtering by sample types: {self.sample_types}') for t in data_types.iterrows(): num_samples = t[1]['counts'] sample_type = t[1]['type'] sample_packs = data[(data['type'] == sample_type)].groupby('sample_pack_key').size().reset_index(name='counts').sample(frac=1, random_state=self.seed) sample_packs['split'] = 'train' for (s, n) in zip(('test', 'val'), (test_size, val_size)): split_samples = int((num_samples * n)) for (i, row) in sample_packs.iterrows(): if ((row['counts'] <= split_samples) and (row['split'] == 'train')): split_samples -= row['counts'] sample_packs.loc[(i, 'split')] = s for (i, row) in sample_packs.iterrows(): data.loc[((data['sample_pack_key'] == row['sample_pack_key']), 'split')] = row['split'] splits = data.groupby('split').size().reset_index(name='counts') splits['percent'] = (splits['counts'] / splits['counts'].sum()) log.info(f'''Split counts: {splits}''') if ('instrument' in data.columns): log.info(f"Insrumens in dataset: {data['instrument'].unique()}") if (self.instruments is not None): log.info(f'Filtering by instruments: {self.instruments}') data = data[data['instrument'].isin(self.instruments)] data = data[(data['split'] == split)] data_types = data.groupby('type').size().reset_index(name='counts') log.info(f'''Number of samples by type: {data_types}''') if ('instrument' in data.columns): inst_types = data.groupby('instrument').size().reset_index(name='counts') log.info(f'''Number of samples by instrument: {inst_types}''') return data def _random_split(self, split: str): "\n Split the dataset into train, validation, and test sets.\n\n Args:\n split: Split to return. Must be one of 'train', 'val', or 'test'.\n " if (self.sample_types is not None): raise NotImplementedError('Cannot use sample types with random split. Use sample_pack split.') if (split not in ['train', 'val', 'test']): raise ValueError("Invalid split. Must be one of 'train', 'val', or 'test'.") splits = random_split(self.file_list, [0.8, 0.1, 0.1], generator=torch.Generator().manual_seed(self.seed)) if (split == 'train'): self.file_list = splits[0] elif (split == 'val'): self.file_list = splits[1] elif (split == 'test'): self.file_list = splits[2]
class AudioWithParametersDataset(AudioDataset): '\n Dataset of audio pairs with an additional parameter tensor\n\n Args:\n data_dir: Path to the directory containing the dataset.\n meta_file: Name of the json metadata file.\n sample_rate: Expected sample rate of the audio files.\n num_samples: Expected number of samples in the audio files.\n parameter_ky: Key in the metadata file for the feature file.\n **kwargs: Additional arguments to pass to AudioPairDataset.\n ' def __init__(self, data_dir: Union[(str, Path)], meta_file: str, sample_rate: int, num_samples: int, parameter_key: str, expected_num_modes: Optional[int]=None, **kwargs): super().__init__(data_dir=data_dir, meta_file=meta_file, sample_rate=sample_rate, num_samples=num_samples, **kwargs) self.parameter_key = parameter_key self.expected_num_modes = expected_num_modes def __getitem__(self, idx): (waveform_a,) = super().__getitem__(idx) feature_file = self.metadata[self.file_list[idx]][self.parameter_key] feature = torch.load(self.data_dir.joinpath(feature_file)) if ((self.expected_num_modes is not None) and (feature.shape[1] != self.expected_num_modes)): null_features = torch.zeros((feature.shape[0], (self.expected_num_modes - feature.shape[1]), feature.shape[2])) feature = torch.cat((feature, null_features), dim=1) return (waveform_a, feature)
class AudioDataModule(pl.LightningDataModule): '\n LightningDataModule for the audio dataset. This class is responsible for downloading\n and extracting a preprocessed dataset, or downloading and preprocessing the\n raw audio files if the preprocessed dataset is not available.\n\n Args:\n batch_size: Batch size, defaults to 32\n num_workers: Number of workers, defaults to 0\n dataset_class: Dataset class, defaults to AudioDataset\n dataset_kwargs: Additional keyword arguments to pass to the dataset class\n constructor, defaults to None\n url: URL to download the dataset from\n bucket: R2 bucket to download the dataset from\n archive: Gzip archive containing the dataset\n meta_file: JSON file containing metadata about the dataset\n data_dir: Directory to extract the dataset to\n data_dir_unprocessed: Directory to extract the unprocessed dataset to\n sample_rate: Sample rate of the audio files, defaults to 48000\n num_samples: Number of samples to load from each audio file, defaults to\n 48000 * 2\n ' def __init__(self, batch_size: int=32, num_workers: int=0, dataset_class: Type[AudioDataset]=AudioDataset, dataset_kwargs: Optional[Dict]=None, url='https://d5d740b2d880827ae0c8f465bf180715.r2.cloudflarestorage.com', bucket='drum-dataset', archive='k2k-dataset-audio-only-v0.tar.gz', meta_file='kick-drums.json', data_dir='dataset/audio-only', data_dir_unprocessed='dataset-unprocessed/audio-only', sample_rate=48000, num_samples=(48000 * 2)): super().__init__() self.batch_size = batch_size self.num_workers = num_workers self.dataset_cls = dataset_class self.dataset_kwargs = (dataset_kwargs or {}) self.url = url self.bucket = bucket self.archive = archive self.meta_file = meta_file self.data_dir = Path(data_dir) self.data_dir_unprocessed = Path(data_dir_unprocessed) self.sample_rate = sample_rate self.num_samples = num_samples def prepare_data(self, use_preprocessed: bool=True) -> None: "\n Download and extract the dataset.\n\n Args:\n use_preprocessed: Whether to use preprocessed data, defaults to True. If\n False, the raw data will be downloaded and processed. This will only\n need to be set to False if the preprocessed data archive isn't available\n " if use_preprocessed: if (not Path(self.data_dir).exists()): if (not Path(self.archive).exists()): log.info('Downloading processed dataset...') data_utils.download_file_r2(self.archive, self.url, self.bucket) log.info('Extracting processed dataset...') extract_archive(self.archive, self.data_dir) else: log.info('Dataset already exists.') else: if Path(self.data_dir).exists(): raise RuntimeError('Preprocessed dataset already exists. Remove it to reprocess.') if (not Path(self.data_dir_unprocessed).exists()): log.info('Downloading unprocessed dataset...') data_utils.download_full_dataset(self.url, self.bucket, self.meta_file, self.data_dir_unprocessed) log.info('Processing unprocessed dataset...') self.preprocess_dataset() def setup(self, stage: str): '\n Assign train/val/test datasets for use in dataloaders.\n\n Args:\n stage: Current stage (fit, validate, test)\n ' args = [self.data_dir, self.meta_file, self.sample_rate, self.num_samples] if (stage == 'fit'): self.train_dataset = self.dataset_cls(*args, split='train', **self.dataset_kwargs) self.val_dataset = self.dataset_cls(*args, split='val', **self.dataset_kwargs) elif (stage == 'validate'): self.val_dataset = self.dataset_cls(*args, split='val', **self.dataset_kwargs) elif (stage == 'test'): self.test_dataset = self.dataset_cls(*args, split='test', **self.dataset_kwargs) def train_dataloader(self): return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) def val_dataloader(self): return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) def test_dataloader(self): return DataLoader(self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) def preprocess_dataset(self) -> None: '\n Preprocess the dataset.\n ' log.info('Preprocessing dataset...') audio_dir = Path(self.data_dir).joinpath('audio') audio_dir.mkdir(parents=True, exist_ok=False) meta_file = Path(self.data_dir_unprocessed).joinpath(self.meta_file) with open(meta_file, 'r') as f: metadata = json.load(f) new_metadata = {} for (key, item) in tqdm(metadata.items()): sample_type = item['type'] folders = item['folders'] (files, file_metadata) = data_utils.get_files_from_folders(self.data_dir_unprocessed, folders, '*.wav') assert (len(files) > 0), f'No files founds in folders: {folders}' if (len(file_metadata) == 0): file_metadata = ([{}] * len(files)) assert (len(files) == len(file_metadata)), 'File and metadata length mismatch' for (file, file_meta) in zip(files, file_metadata): output_hash = data_utils.str2int(str(Path(*file.parts[1:]))) output_file = Path(audio_dir).joinpath(f'{output_hash}.wav') try: audio_utils.preprocess_audio_file(file, output_file, self.sample_rate, self.num_samples) except ValueError as e: log.warning(f'Error processing file {file}: {e}. Skipping...') continue new_metadata[output_hash] = {'filename': str(output_file.relative_to(self.data_dir)), 'type': sample_type, 'sample_pack_key': key, **file_meta} with open(Path(self.data_dir).joinpath(self.meta_file), 'w') as f: json.dump(new_metadata, f) def archive_dataset(self, archive_name: str) -> None: '\n Archive the dataset.\n\n Args:\n archive_name: Name of the archive.\n ' log.info('Creating a tarfile of the dataset') data_utils.create_tarfile(archive_name, self.data_dir)
class ModalDataModule(AudioDataModule): '\n DataModule for the modal audio dataset. In addition to the origin audio waveform,\n this also contains a synthesized waveform containing only the modal components,\n extracted from the original waveform using sinusoidal modeling.\n\n Dataset items are returned as pairs of (original, modal) waveforms.\n\n Args:\n batch_size: Batch size, defaults to 32\n num_workers: Number of workers, defaults to 0\n dataset_class: Dataset class, defaults to AudioPairDataset\n dataset_kwargs: Additional keyword arguments to pass to the dataset class\n constructor, defaults to None\n url: URL to download the dataset from\n bucket: R2 bucket to download the dataset from\n archive: Gzip archive containing the dataset\n meta_file: JSON file containing metadata about the dataset\n data_dir: Directory to extract the dataset to\n data_dir_unprocessed: Directory to extract the unprocessed dataset to\n sample_rate: Sample rate of the audio files, defaults to 48000\n num_samples: Number of samples to load from each audio file, defaults to\n 48000 * 2\n num_modes: Number of modes to extract from the original waveform, modes are\n extracted using sinusoidal modeling and are sorted by their amplitude\n in descending order, defaults to 1\n min_length: Minimum length of the extracted modes in frames, defaults to 10\n threshold: Threshold for the amplitude (in dB) of the extracted sinusoids\n to be considered, defaults to -80.0\n hop_length: Hop length for the CQT used in sinusoidal modelling, defaults to 64\n fmin: Minimum frequency for the CQT used in sinusoidal modelling, defaults to 20\n n_bins: Number of bins for the CQT used in sinusoidal modelling, defaults to 96\n bins_per_octave: Number of bins per octave for the CQT used in sinusoidal\n modelling, defaults to 12\n diff_threshold: Maximum difference in percent to consider two frequencies\n to be part of the same sinusoidal track.\n save_modal_audio: Whether to save the modal audio files, defaults to True\n ' def __init__(self, batch_size: int=8, num_workers: int=0, dataset_class: Type[AudioDataset]=AudioDataset, dataset_kwargs: Optional[Dict]=None, url='https://d5d740b2d880827ae0c8f465bf180715.r2.cloudflarestorage.com', bucket='drum-dataset', archive='k2k-dataset-audio-only-v0.tar.gz', meta_file='kick-drums.json', data_dir='dataset/modal', data_dir_unprocessed='dataset-unprocessed/modal', sample_rate=48000, num_samples=(48000 * 2), num_modes=1, min_length=10, threshold=(- 80.0), hop_length=64, fmin=20, n_bins=96, bins_per_octave=12, diff_threshold=5.0, save_modal_audio=True): super().__init__(batch_size=batch_size, num_workers=num_workers, dataset_class=dataset_class, dataset_kwargs=dataset_kwargs, url=url, bucket=bucket, archive=archive, meta_file=meta_file, data_dir=data_dir, data_dir_unprocessed=data_dir_unprocessed, sample_rate=sample_rate, num_samples=num_samples) self.num_modes = num_modes self.min_length = min_length self.threshold = threshold self.hop_length = hop_length self.fmin = fmin self.n_bins = n_bins self.bins_per_octave = bins_per_octave self.diff_threshold = diff_threshold self.save_modal_audio = save_modal_audio def preprocess_dataset(self) -> None: '\n Overwrite the parent method to add modal features to the dataset.\n First preprocess the dataset as normal, then add then extracts the\n modal features and saves them alongside the audio files.\n ' super().preprocess_dataset() log.info('Extracting modal features...') with open(Path(self.data_dir).joinpath(self.meta_file), 'r') as f: metadata = json.load(f) feature_dir = Path(self.data_dir).joinpath('features') feature_dir.mkdir(parents=True, exist_ok=False) modal = CQTModalAnalysis(self.sample_rate, hop_length=self.hop_length, fmin=self.fmin, n_bins=self.n_bins, bins_per_octave=self.bins_per_octave, min_length=self.min_length, num_modes=self.num_modes, threshold=self.threshold, diff_threshold=self.diff_threshold) keys_to_remove = [] for (key, item) in tqdm(metadata.items()): audio_file = Path(self.data_dir).joinpath(item['filename']) (waveform, _) = torchaudio.load(audio_file) try: (modal_freqs, modal_amps, modal_phases) = modal(waveform) except RuntimeError: log.warning(f'Failed to extract modal features for {audio_file}') keys_to_remove.append(key) continue modal_freqs = (((2 * torch.pi) * modal_freqs) / self.sample_rate) modal_tensor = torch.stack([modal_freqs, modal_amps, modal_phases]) modal_tensor = rearrange(modal_tensor, 's 1 m f -> s m f') modal_file = feature_dir.joinpath(audio_file.name.replace('.wav', '.pt')) torch.save(modal_tensor, modal_file) if self.save_modal_audio: modal_audio = modal_synth(modal_freqs, modal_amps, self.num_samples, modal_phases) modal_audio_file = audio_file.parent.joinpath(audio_file.name.replace('.wav', '_modal.wav')) torchaudio.save(modal_audio_file, modal_audio, self.sample_rate) metadata[key]['filename_modal'] = str(modal_audio_file.relative_to(self.data_dir)) metadata[key]['feature_file'] = str(modal_file.relative_to(self.data_dir)) for key in keys_to_remove: metadata.pop(key) with open(Path(self.data_dir).joinpath(self.meta_file), 'w') as f: json.dump(metadata, f)
class FirstOrderDifferenceLoss(torch.nn.Module): '\n A loss function that calculates the first-order difference\n of the input and target tensors and then calculates the L1\n loss between the two. This essentially applies a high-pass\n filter to the signal before calculating the loss, which may\n potentially be useful for emphasizing transient components.\n\n Args:\n reduction (str): The reduction method to use, passed\n into the L1 loss. Defaults to "mean".\n ' def __init__(self, reduction: str='mean'): super().__init__() self.loss = torch.nn.L1Loss(reduction=reduction) def forward(self, pred, target): pred_diff = torch.diff(pred) target_diff = torch.diff(target) return self.loss(pred_diff, target_diff)
class WeightedLoss(torch.nn.Module): '\n A loss function that combines and sums weightings of multiple loss functions.\n\n Args:\n losses: A list of loss functions.\n weights: A list of weights for each loss function. Defaults to None, which\n results in equal weighting of all loss functions.\n ' def __init__(self, loss_fns: List[Union[(Callable, torch.nn.Module)]], weights: Optional[List[float]]=None): super().__init__() self.loss_fns = loss_fns if (weights is None): weights = ([1.0] * len(loss_fns)) else: assert (len(loss_fns) == len(weights)), 'Number of losses and weights must match.' self.weights = weights def forward(self, *args, **kwargs): losses = [(weight * loss_fn(*args, **kwargs)) for (loss_fn, weight) in zip(self.loss_fns, self.weights)] return sum(losses)
class LogSpectralDistance(Metric): '\n Log Spectral Distance (LSD) metric.\n\n Implementation based on https://arxiv.org/abs/1909.06628\n ' full_state_update = False def __init__(self, n_fft=8092, hop_size=64, eps: float=1e-08, **kwargs: Any) -> None: super().__init__(**kwargs) self.add_state('lsd', default=torch.tensor(0.0), dist_reduce_fx='sum') self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum') self.n_fft = n_fft self.hop_size = hop_size self.eps = eps def _log_spectral_power_mag(self, x: torch.Tensor) -> torch.Tensor: X = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_size, window=torch.hann_window(self.n_fft, device=x.device), return_complex=True) return torch.log((torch.square(torch.abs(X)) + self.eps)) def update(self, x: torch.Tensor, y: torch.Tensor) -> None: assert (x.shape == y.shape) assert ((x.ndim == 3) and (x.shape[1] == 1)), 'Only mono audio is supported' x = x.squeeze(1) y = y.squeeze(1) X = self._log_spectral_power_mag(x) Y = self._log_spectral_power_mag(y) lsd = torch.mean(torch.square((X - Y)), dim=(- 2)) lsd = torch.mean(torch.sqrt(lsd), dim=(- 1)) self.lsd += torch.sum(lsd) self.count += lsd.shape[0] def compute(self) -> torch.Tensor: return (self.lsd / self.count)
class MFCCError(Metric): '\n MFCC Error\n ' full_state_update = False def __init__(self, sample_rate: int=48000, n_mfcc: int=40, n_fft: int=2048, hop_length: int=128, **kwargs: Any) -> None: super().__init__(**kwargs) self.add_state('mfcc', default=torch.tensor(0.0), dist_reduce_fx='sum') self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum') self.sample_rate = sample_rate self.n_mfcc = n_mfcc self.n_fft = n_fft self.hop_length = hop_length def update(self, x: torch.Tensor, y: torch.Tensor) -> None: assert (x.shape == y.shape) assert ((x.ndim == 3) and (x.shape[1] == 1)), 'Only mono audio is supported' x = x.squeeze(1).cpu() y = y.squeeze(1).cpu() mfcc = torchaudio.transforms.MFCC(sample_rate=self.sample_rate, n_mfcc=self.n_mfcc, melkwargs={'n_fft': self.n_fft, 'hop_length': self.hop_length}) X = mfcc(x) Y = mfcc(y) mae = torch.mean(torch.abs((X - Y)), dim=(- 1)) self.mfcc += torch.sum(mae) self.count += mae.shape[0] def compute(self) -> torch.Tensor: return (self.mfcc / self.count)
class SpectralFluxOnsetError(Metric): '\n Error between spectral flux onset signals\n ' full_state_update = False def __init__(self, n_fft=1024, hop_size=64, **kwargs: Any) -> None: super().__init__(**kwargs) self.add_state('error', default=torch.tensor(0.0), dist_reduce_fx='sum') self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum') self.n_fft = n_fft self.hop_size = hop_size def _onset_signal(self, x: torch.Tensor) -> torch.Tensor: assert (x.dim() == 3), 'Input must be of shape (batch, channels, length)' assert (x.shape[1] == 1), 'Input must be mono' x = x.squeeze(1) X = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_size, window=torch.hann_window(self.n_fft, device=x.device), return_complex=True, pad_mode='constant', normalized=False, onesided=True) flux = torch.diff(torch.abs(X), dim=1) flux = ((flux + torch.abs(flux)) / 2) flux = torch.square(flux) flux = torch.sum(flux, dim=1) return flux def update(self, x: torch.Tensor, y: torch.Tensor) -> None: assert (x.shape == y.shape) assert ((x.ndim == 3) and (x.shape[1] == 1)), 'Only mono audio is supported' x = self._onset_signal(x) y = self._onset_signal(y) onset_error = torch.mean(torch.abs((x - y)), dim=(- 1)) self.error += torch.sum(onset_error) self.count += onset_error.shape[0] def compute(self) -> torch.Tensor: return (self.error / self.count)
class Pad(nn.Module): 'Pad a tensor with zeros according to causal or non-causal 1D padding scheme.\n\n Args:\n kernel_size (int): Size of the convolution kernel.\n dilation (int): Dilation factor.\n causal (bool, optional): Whether to use causal padding. Defaults to True.\n ' def __init__(self, kernel_size: int, dilation: int, causal: bool=True): super().__init__() pad = (dilation * (kernel_size - 1)) if (not causal): pad //= 2 self.padding = (pad, pad) else: self.padding = (pad, 0) def forward(self, x): return nn.functional.pad(x, self.padding)
class FiLM(nn.Module): 'Feature-wise Linear Modulation layer. Takes an embedding -- usually shared\n between layers -- and applies a linear transformation to get the affine parameters\n of the FiLM transformation.\n\n Args:\n film_embedding_size (int): Size of the FiLM embedding.\n input_channels (int): Number of input channels.\n use_batch_norm (bool, optional): Whether to use batch normalization.\n Defaults to True.\n ' def __init__(self, film_embedding_size: int, input_channels: int, use_batch_norm: bool=True): super().__init__() self.use_batch_norm = use_batch_norm if self.use_batch_norm: self.norm = nn.BatchNorm1d(input_channels, affine=False) self.net = nn.Linear(film_embedding_size, (input_channels * 2)) def forward(self, x: torch.Tensor, film_embedding: torch.Tensor): film = self.net(film_embedding) (gamma, beta) = film.chunk(2, dim=(- 1)) if self.use_batch_norm: x = self.norm(x) return ((gamma[(..., None)] * x) + beta[(..., None)])
class TFiLM(nn.Module): 'Temporal Feature-wise Linear Modulation layer. Derives affine parameters from a\n decimated version of the input signal, and applies them to the input. Allows the\n model to learn longer-range temporal dependencies.\n ' def __init__(self, channels: int, block_size: int): super().__init__() self.block_size = block_size self.pool = nn.MaxPool1d(block_size) self.block_size = block_size self.lstm = nn.LSTM(input_size=channels, hidden_size=channels, num_layers=1) self.proj = nn.Linear(channels, (channels * 2)) def forward(self, x: torch.Tensor) -> torch.Tensor: (*_, length) = x.shape n_blocks = (length // self.block_size) assert (n_blocks > 0), 'Input length must be greater than block size.' assert (length == (n_blocks * self.block_size)), 'Input length must be divisible by block size.' x_decimated = self.pool(x) x_decimated = rearrange(x_decimated, 'b c t -> t b c') (affine, _) = self.lstm(x_decimated) affine = self.proj(affine) affine = rearrange(affine, 't b c -> b c t 1') (gamma, beta) = affine.chunk(2, dim=1) x = rearrange(x, 'b c (n k) -> b c n k', k=self.block_size) x = ((gamma * x) + beta) x = rearrange(x, 'b c n k -> b c (n k)') return x
class GatedActivation(nn.Module): 'Gated activation function for 1D convolutional networks. Expects input of shape\n (batch_size, channels * 2, time).\n ' def forward(self, x: torch.Tensor) -> torch.Tensor: (x1, x2) = x.chunk(2, dim=(- 2)) assert (x1.shape[(- 2)] == x2.shape[(- 2)]), 'Input channels must be divisible by 2.' return (torch.tanh(x1) * torch.sigmoid(x2))
class AttentionPooling(nn.Module): def __init__(self, in_features: int, keep_seq_dim: bool=False): super().__init__() self.norm = nn.LayerNorm(in_features) self.query = nn.Parameter(torch.zeros(1, 1, in_features)) self.attn = nn.MultiheadAttention(in_features, 1, bias=False) self.keep_seq_dim = keep_seq_dim def forward(self, x: torch.Tensor) -> torch.Tensor: 'Expects shape (batch_size, channels, time)' x = rearrange(x, 'b c t -> t b c') x = self.norm(x) q = repeat(self.query, '() () c -> () b c', b=x.shape[1]) (attn, _) = self.attn(q, x, x, need_weights=False) if self.keep_seq_dim: attn = rearrange(attn, 't b c -> b c t') else: attn = attn.squeeze(dim=0) return attn
class _SoundStreamResidualUnit(nn.Module): def __init__(self, width: int, dilation: int, kernel_size: int=7, causal: bool=False, film_conditioning: bool=False, film_embedding_size: int=128, film_batch_norm: bool=False): super().__init__() self.net = nn.Sequential(Pad(kernel_size, dilation, causal=causal), nn.Conv1d(width, width, kernel_size, dilation=dilation, padding=0), nn.ELU(), nn.Conv1d(width, width, 1)) self.final_activation = nn.ELU() if film_conditioning: self.film = FiLM(film_embedding_size, width, film_batch_norm) else: self.film = None def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None) -> torch.Tensor: y = self.net(x) if (self.film is not None): y = self.film(y, film_embedding) return (x + self.final_activation(y))
class _SoundStreamEncoderBlock(nn.Module): def __init__(self, width: int, stride: int, kernel_size: int=7, causal: bool=False, film_conditioning: bool=False, film_embedding_size: int=128, film_batch_norm: bool=False): super().__init__() self.net = nn.ModuleList([_SoundStreamResidualUnit((width // 2), 1, kernel_size, causal=causal, film_conditioning=film_conditioning, film_embedding_size=film_embedding_size, film_batch_norm=film_batch_norm), _SoundStreamResidualUnit((width // 2), 3, kernel_size, causal=causal, film_conditioning=film_conditioning, film_embedding_size=film_embedding_size, film_batch_norm=film_batch_norm), _SoundStreamResidualUnit((width // 2), 9, kernel_size, causal=causal, film_conditioning=film_conditioning, film_embedding_size=film_embedding_size, film_batch_norm=film_batch_norm)]) self.output = nn.Sequential(Pad((2 * stride), 1, causal=causal), nn.Conv1d((width // 2), width, (2 * stride), stride=stride, padding=0), nn.ELU()) def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None) -> torch.Tensor: for layer in self.net: x = layer(x, film_embedding) return self.output(x)
class SoundStreamEncoder(nn.Module): 'Convolutional waveform encoder from SoundStream model, without vector\n quantization.\n\n Args:\n input_channels (int): Number of input channels.\n hidden_channels (int): Number of hidden channels.\n output_channels (int): Number of output channels.\n kernel_size (int, optional): Kernel size. Defaults to 7.\n strides (tuple[int, ...], optional): Strides. Defaults to (2, 2, 4, 4).\n causal (bool, optional): Whether to use causal padding. Defaults to False.\n ' def __init__(self, input_channels: int, hidden_channels: int, output_channels: int, kernel_size: int=7, strides: tuple[(int, ...)]=(2, 2, 4, 4), causal: bool=False, film_conditioning: bool=False, film_embedding_size: int=128, film_batch_norm: bool=False, transpose_output: bool=False): super().__init__() self.input = nn.Sequential(Pad(kernel_size, 1, causal=causal), nn.Conv1d(input_channels, hidden_channels, kernel_size, padding=0)) encoder_blocks = [] for stride in strides: hidden_channels *= 2 encoder_blocks.append(_SoundStreamEncoderBlock(hidden_channels, stride, kernel_size=kernel_size, causal=causal, film_conditioning=film_conditioning, film_embedding_size=film_embedding_size, film_batch_norm=film_batch_norm)) self.encoder_blocks = nn.ModuleList(encoder_blocks) self.output = nn.Sequential(Pad(3, 1, causal=causal), nn.Conv1d(hidden_channels, output_channels, 3, padding=0)) self.transpose_output = transpose_output def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None) -> torch.Tensor: x = self.input(x) for encoder_block in self.encoder_blocks: x = encoder_block(x, film_embedding) if self.transpose_output: x = self.output(x) x = rearrange(x, 'b c t -> b t c') return x return self.output(x)
class SoundStreamAttentionEncoder(nn.Module): 'SoundStream encoder with attention pooling' def __init__(self, input_channels: int, hidden_channels: int, output_channels: int, **kwargs): super().__init__() self.encoder = SoundStreamEncoder(input_channels, hidden_channels, output_channels, **kwargs) self.pooling = AttentionPooling(output_channels) def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None) -> torch.Tensor: x = self.encoder(x, film_embedding) x = self.pooling(x) return x
def _get_activation(activation: str): if (activation == 'gated'): return GatedActivation() return getattr(nn, activation)()
class _DilatedResidualBlock(nn.Module): 'Temporal convolutional network internal block\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n kernel_size (int): Size of the convolution kernel.\n dilation (int): Dilation factor.\n causal (bool, optional): Whether to use causal padding. Defaults to True.\n norm (Literal["batch", "instance", None], optional): Normalization type.\n activation (str, optional): Activation function in `torch.nn` or "gated".\n Defaults to "GELU".\n film_conditioning (bool, optional): Whether to use FiLM conditioning. Defaults\n to False.\n film_embedding_size (int, optional): Size of the FiLM embedding. Defaults to\n None.\n film_batch_norm (bool, optional): Whether to use batch normalization in FiLM.\n Defaults to True.\n use_temporal_film (bool, optional): Whether to use TFiLM conditioning. Defaults\n to False.\n temporal_film_block_size (int, optional): TFiLM block size. Defaults to None.\n ' def __init__(self, in_channels: int, out_channels: int, kernel_size: int, dilation: int, causal: bool=True, norm: Literal[('batch', 'instance', None)]=None, activation: str='GELU', film_conditioning: bool=False, film_embedding_size: Optional[int]=None, film_batch_norm: bool=True, use_temporal_film: bool=False, temporal_film_block_size: Optional[int]=None): super().__init__() if (film_conditioning and ((film_embedding_size is None) or (not isinstance(film_embedding_size, int)) or (film_embedding_size < 1))): raise ValueError('FiLM conditioning requires a valid embedding size (int >= 1).') if (use_temporal_film and ((temporal_film_block_size is None) or (not isinstance(temporal_film_block_size, int)) or (temporal_film_block_size < 1))): raise ValueError('TFiLM conditioning requires a valid block size (int >= 1).') net = [] pre_activation_channels = ((out_channels * 2) if (activation == 'gated') else out_channels) if (norm is not None): if (norm not in ('batch', 'instance')): raise ValueError('Invalid norm type (must be batch or instance)') _Norm = (nn.BatchNorm1d if (norm == 'batch') else nn.InstanceNorm1d) net.append(_Norm(in_channels)) net.extend([Pad(kernel_size, dilation, causal=causal), nn.Conv1d(in_channels, pre_activation_channels, kernel_size, dilation=dilation, padding=0)]) self.net = nn.Sequential(*net) self.film = (FiLM(film_embedding_size, pre_activation_channels, film_batch_norm) if film_conditioning else None) self.activation = _get_activation(activation) self.tfilm = (TFiLM(out_channels, temporal_film_block_size) if use_temporal_film else None) self.residual = nn.Conv1d(in_channels, out_channels, 1) def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None): activations = self.net(x) if (self.film is not None): activations = self.film(activations, film_embedding) y = self.activation(activations) if (self.tfilm is not None): y = self.tfilm(y) return (y + self.residual(x))
class TCN(nn.Module): 'Temporal convolutional network\n\n Args:\n in_channels (int): Number of input channels.\n hidden_channels (int): Number of hidden channels.\n out_channels (int): Number of output channels.\n dilation_base (int, optional): Base of the dilation factor. Defaults to 2.\n num_layers (int, optional): Number of layers. Defaults to 8.\n kernel_size (int, optional): Size of the convolution kernel. Defaults to 3.\n causal (bool, optional): Whether to use causal padding. Defaults to True.\n norm (Literal["batch", "instance", None], optional): Normalization type.\n activation (str, optional): Activation function in `torch.nn` or "gated".\n Defaults to "GELU".\n film_conditioning (bool, optional): Whether to use FiLM conditioning. Defaults\n to False.\n film_embedding_size (int, optional): FiLM embedding size. Defaults to None.\n film_batch_norm (bool, optional): Whether to use batch normalization in FiLM.\n Defaults to True.\n use_temporal_film (bool, optional): Whether to use TFiLM conditioning. Defaults\n to False.\n temporal_film_block_size (int, optional): TFiLM block size. Defaults to None.\n ' def __init__(self, in_channels: int, hidden_channels: int, out_channels: int, dilation_base: int=2, dilation_blocks: Optional[int]=None, num_layers: int=8, kernel_size: int=3, causal: bool=True, norm: Literal[('batch', 'instance', None)]=None, activation: str='GELU', film_conditioning: bool=False, film_embedding_size: Optional[int]=None, film_batch_norm: bool=True, use_temporal_film: bool=False, temporal_film_block_size: Optional[int]=None): super().__init__() self.in_projection = nn.Conv1d(in_channels, hidden_channels, 1) self.out_projection = nn.Conv1d(hidden_channels, out_channels, 1) net = [] dilation_blocks = (dilation_blocks if (dilation_blocks is not None) else num_layers) for n in range(num_layers): dilation = (dilation_base ** (n % dilation_blocks)) net.append(_DilatedResidualBlock(hidden_channels, hidden_channels, kernel_size, dilation, causal=causal, norm=norm, activation=activation, film_conditioning=film_conditioning, film_embedding_size=film_embedding_size, film_batch_norm=film_batch_norm, use_temporal_film=use_temporal_film, temporal_film_block_size=temporal_film_block_size)) self.net = nn.ModuleList(net) def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None): x = self.in_projection(x) for layer in self.net: x = layer(x, film_embedding) x = self.out_projection(x) return x
class ModalSynth(torch.nn.Module): '\n Modal synthesis with given frequencies, amplitudes, and optional phase\n Users linear interpolation to generate the frequency envelope.\n ' def forward(self, params: torch.Tensor, num_samples: int): '\n params: [nb,num_params,num_modes,num_frames], expected parameters\n are: frequency, amplitude, and phase (optional)\n num_samples: number of samples to generate\n ' assert (params.ndim == 4), 'Expected 4D tensor' assert (params.size()[1] in [2, 3]), 'Expected 2 or 3 parameters' params = torch.chunk(params, params.size()[1], dim=1) params = [p.squeeze(1) for p in params] phase = None if (len(params) == 3): phase = params[2] y = modal_synth(params[0], params[1], num_samples, phase) y = rearrange(y, 'b n -> b 1 n') return y
def modal_synth(freqs: torch.Tensor, amps: torch.Tensor, num_samples: int, phase: Optional[torch.Tensor]=None) -> torch.Tensor: '\n Synthesizes a modal signal from a set of frequencies, phases, and amplitudes.\n\n Args:\n freqs: A 3D tensor of frequencies in angular frequency of shape\n (batch_size, num_modes, num_frames)\n amps: A 3D tensor of amplitudes of shape (batch_size, num_modes, num_frames)\n sample_rate: Sample rate of the output signal\n num_samples: Number of samples in the output signal\n ' (batch_size, num_modes, num_frames) = freqs.shape assert (freqs.shape == amps.shape) w = torch.nn.functional.interpolate(freqs, size=num_samples, mode='linear') a = torch.nn.functional.interpolate(amps, size=num_samples, mode='linear') a = rearrange(a, 'b m n -> (b m) n') w = rearrange(w, 'b m n -> (b m) n') phase_env = torch.cumsum(w, dim=1) if (phase is not None): phase = rearrange(phase, 'b m n -> (b m) n') phase_env = (phase_env + phase[(..., 0, None)]) y = (a * torch.sin(phase_env)) y = rearrange(y, '(b m) n -> b m n', b=batch_size, m=num_modes) y = torch.sum(y, dim=1) return y
class TransientTCN(torch.nn.Module): def __init__(self, in_channels: int=1, hidden_channels: int=32, out_channels: int=1, dilation_base: int=2, dilation_blocks: Optional[int]=None, num_layers: int=8, kernel_size: int=13, film_conditioning: bool=False, film_embedding_size: Optional[int]=None, film_batch_norm: bool=True, transient_conditioning: bool=False, transient_conditioning_channels: int=32, transient_conditioning_length: int=24000): super().__init__() self.tcn = TCN(in_channels=in_channels, hidden_channels=hidden_channels, out_channels=out_channels, dilation_base=dilation_base, dilation_blocks=dilation_blocks, num_layers=num_layers, kernel_size=kernel_size, film_conditioning=film_conditioning, film_embedding_size=film_embedding_size, film_batch_norm=film_batch_norm) if transient_conditioning: p = (torch.randn(1, transient_conditioning_channels, transient_conditioning_length) / transient_conditioning_channels) self.transient_conditioning = torch.nn.Parameter(p, requires_grad=True) def forward(self, x: torch.Tensor, embedding: Optional[torch.Tensor]=None): if hasattr(self, 'transient_conditioning'): cond = repeat(self.transient_conditioning, '1 c l -> b c l', b=x[0].size(0)) cond = torch.nn.functional.pad(cond, (0, (x[0].size((- 1)) - cond.size((- 1))))) x = torch.cat([x, cond], dim=1) x = self.tcn(x, embedding) return x
class DrumBlender(pl.LightningModule): '\n LightningModule for kick synthesis from a modal frequency input\n\n # TODO: Alot of these are currently optional to help with testing and devlopment,\n # but they should be required in the future\n\n Args:\n modal_synth (nn.Module): Synthesis model takes modal parameters and generates\n audio\n loss_fn (Union[Callable, nn.Module]): Loss function to use for training\n noise_synth (Optional[nn.Module]): Receives noise parameters and generates\n noise audio signal\n transient_synth (Optional[nn.Module]): Receives audio plus transient parameters\n and generates transient audio signal\n modal_autoencoder (Optional[nn.Module]): Receives main embedding and\n generates modal parameters\n noise_autoencoder (Optional[nn.Module]): Receives main embedding and\n generates noise parameters\n transient_autoencoder (Optional[nn.Module]): Receives main embedding and\n generates transient parameters\n encoder (Optional[nn.Module]): Receives audio and generates main embedding\n float32_matmul_precision(Literal["medium", "high", "highest", None]): Sets\n the precision of float32 matmul operations.\n ' def __init__(self, modal_synth: nn.Module, loss_fn: Union[(Callable, nn.Module)], noise_synth: Optional[nn.Module]=None, transient_synth: Optional[nn.Module]=None, modal_autoencoder: Optional[nn.Module]=None, noise_autoencoder: Optional[nn.Module]=None, transient_autoencoder: Optional[nn.Module]=None, encoder: Optional[nn.Module]=None, transient_parallel: bool=False, transient_takes_noise: bool=False, modal_autoencoder_accepts_audio: bool=False, noise_autoencoder_accepts_audio: bool=False, transient_autoencoder_accepts_audio: bool=False, test_metrics: Optional[torch.nn.ModuleDict]=None, float32_matmul_precision: Literal[('medium', 'high', 'highest', None)]=None): super().__init__() self.modal_synth = modal_synth self.loss_fn = loss_fn self.noise_synth = noise_synth self.transient_synth = transient_synth self.modal_autoencoder = modal_autoencoder self.noise_autoencoder = noise_autoencoder self.transient_autoencoder = transient_autoencoder self.encoder = encoder self.transient_parallel = transient_parallel self.modal_autoencoder_accepts_audio = modal_autoencoder_accepts_audio self.noise_autoencoder_accepts_audio = noise_autoencoder_accepts_audio self.transient_autoencoder_accepts_audio = transient_autoencoder_accepts_audio self.transient_takes_noise = transient_takes_noise if (float32_matmul_precision is not None): torch.set_float32_matmul_precision(float32_matmul_precision) if (test_metrics is not None): self.metrics = test_metrics def forward(self, original: torch.Tensor, params: torch.Tensor): embedding = None if (self.encoder is not None): embedding = self.encoder(original) modal_params = params if (self.modal_autoencoder is not None): if self.modal_autoencoder_accepts_audio: modal_params = self.modal_autoencoder(original, params) else: (modal_params, _) = self.modal_autoencoder(embedding, params) noise_params = None if (self.noise_autoencoder is not None): if self.noise_autoencoder_accepts_audio: noise_params = self.noise_autoencoder(original) else: (noise_params, _) = self.noise_autoencoder(embedding) transient_params = None if (self.transient_autoencoder is not None): if self.transient_autoencoder_accepts_audio: transient_params = self.transient_autoencoder(original) else: (transient_params, _) = self.transient_autoencoder(embedding) y_hat = self.modal_synth(modal_params, original.shape[(- 1)]) if (self.noise_synth is not None): assert (noise_params is not None), 'Noise params must be provided' noise = self.noise_synth(noise_params, original.shape[(- 1)]) noise = rearrange(noise, 'b n -> b () n') if self.transient_takes_noise: y_hat = (y_hat + noise) if (self.transient_synth is not None): transient = self.transient_synth(y_hat, transient_params) if self.transient_parallel: y_hat = (y_hat + transient) else: y_hat = transient if (self.noise_synth is not None): if (self.transient_takes_noise is False): y_hat = (y_hat + noise) return y_hat def _do_step(self, batch: Tuple[(torch.Tensor, ...)]): if (len(batch) == 2): (original, params) = batch else: raise ValueError('Expected batch to be a tuple of length 2') y_hat = self(original, params) loss = self.loss_fn(y_hat, original) return (loss, y_hat) def training_step(self, batch: Tuple[(torch.Tensor, torch.Tensor)], batch_idx: int): (loss, _) = self._do_step(batch) self.log('train/loss', loss, on_epoch=True) return loss def validation_step(self, batch: Tuple[(torch.Tensor, torch.Tensor)], batch_idx: int): (loss, _) = self._do_step(batch) self.log('validation/loss', loss) return loss def test_step(self, batch: Tuple[(torch.Tensor, torch.Tensor)], batch_idx: int): (loss, y_hat) = self._do_step(batch) self.log('test/loss', loss) if hasattr(self, 'metrics'): for (name, metric) in self.metrics.items(): self.log(f'test/{name}', metric(y_hat, batch[0])) return loss
def download_full_dataset(url: str, bucket: str, metafile: str, output_dir: Union[(str, Path)]) -> None: '\n Download the kick dataset from Cloudflare.\n\n Args:\n url: The URL of the Cloudflare endpoint.\n bucket: The name of the bucket to download from.\n metafile: The name of the metadata file.\n output_dir: The directory to download the dataset to.\n ' s3 = boto3.client('s3', endpoint_url=url, aws_access_key_id=os.environ.get('CLOUDFLARE_ACCESS_KEY_ID'), aws_secret_access_key=os.environ.get('CLOUDFLARE_ACCESS_SECRET_KEY'), region_name='auto') output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) json_output = output_dir.joinpath(metafile) s3.download_file(bucket, metafile, str(json_output)) with open(json_output, 'r') as f: metadata = json.load(f) log.info('Getting list of files to download...') file_list = get_file_list_r2(metadata, bucket, s3) log.info(f'Found {len(file_list)} files to download.') download_filelist_r2(file_list, output_dir, bucket, s3)
def get_file_list_r2(metadata: Dict, bucket: str, s3) -> List: '\n List all objects in subfolders of the R2 bucket.\n\n Args:\n metadata (Dict): The dataset metadata. Contains a list of items and\n their subfolders, which contain the files to download.\n bucket (str): The name of the bucket.\n s3 (boto3.client): The boto3 client for the R2 bucket.\n\n Returns:\n List: A list of all the files to download.\n ' file_list = [] for item in tqdm(metadata): folders = metadata[item]['folders'] for folder in folders: file_list.extend(get_subfolder_filelist_r2(bucket, folder, s3)) return file_list
def get_subfolder_filelist_r2(bucket: str, subfolder: str, s3) -> List: '\n List all objects in a subfolder of the R2 bucket. Makes sure to\n handle continuation tokens.\n\n Args:\n bucket (str): The name of the bucket.\n subfolder (str): The subfolder to list files from.\n s3 (boto3.client): The boto3 client for the R2 bucket.\n\n Returns:\n List: A list of all the files to download.\n ' kwargs = {'Bucket': bucket, 'Prefix': subfolder} file_list = [] while True: objs = s3.list_objects_v2(**kwargs) for obj in objs['Contents']: file_list.append(obj['Key']) if objs['IsTruncated']: kwargs['ContinuationToken'] = objs['NextContinuationToken'] else: break return file_list
def download_filelist_r2(file_list: List, output_dir: Path, bucket: str, s3): '\n Download a list of files from the R2 bucket.\n\n Args:\n file_list (List): A list of files to download.\n bucket (str): The name of the bucket.\n s3 (boto3.client): The boto3 client for the R2 bucket.\n ' for file in tqdm(file_list): Path(output_dir.joinpath(file).parent).mkdir(parents=True, exist_ok=True) s3.download_file(bucket, file, str(output_dir.joinpath(file)))
def download_file_r2(filename: str, url: str, bucket: str, output: Optional[str]=None): '\n Download a file from an R2 bucket.\n\n Args:\n filename: The name of the file to download.\n url: The URL of the Cloudflare endpoint.\n bucket: The name of the bucket.\n output (optional): The name of the output file. Defaults to filename in bucket.\n ' s3 = boto3.client('s3', endpoint_url=url, aws_access_key_id=os.environ.get('CLOUDFLARE_ACCESS_KEY_ID'), aws_secret_access_key=os.environ.get('CLOUDFLARE_ACCESS_SECRET_KEY'), region_name='auto') if (output is None): output = filename obj = s3.head_object(Bucket=bucket, Key=filename) size = obj['ContentLength'] s3.download_file(bucket, filename, str(output), Callback=R2ProgressPercentage(filename, upload=False, size=size)) sys.stdout.write('\n')
def upload_file_r2(filename: str, url: str, bucket: str): '\n Upload a file to the R2 bucket.\n\n Args:\n filename (str): The name of the file to upload.\n url (str): The URL of the Cloudflare endpoint.\n bucket (str): The name of the bucket.\n ' s3 = boto3.client('s3', endpoint_url=url, aws_access_key_id=os.environ.get('CLOUDFLARE_ACCESS_KEY_ID'), aws_secret_access_key=os.environ.get('CLOUDFLARE_ACCESS_SECRET_KEY'), region_name='auto') s3.upload_file(filename, bucket, filename, Callback=R2ProgressPercentage(filename))
class R2ProgressPercentage(): '\n A class to track the progress of a file upload to the R2 bucket.\n https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html # noqa: E501\n\n Args:\n filename: The name of the file being transferred.\n upload: Whether the file is being uploaded or downloaded,\n defaults to True (Upload).\n size (optional): The size of the file being transferred, defaults to None.\n Required for downloads.\n ' def __init__(self, filename: str, upload: bool=True, size: Optional[float]=None): self._filename = filename if upload: self._size = float(os.path.getsize(filename)) else: assert (size is not None), 'Must provide size for download.' self._size = size self._seen_so_far = 0 self._lock = threading.Lock() def __call__(self, bytes_amount): with self._lock: self._seen_so_far += bytes_amount percentage = ((self._seen_so_far / self._size) * 100) sys.stdout.write(('\r%s %s / %s (%.2f%%)' % (self._filename, self._seen_so_far, self._size, percentage))) sys.stdout.flush()
def get_files_from_folders(basedir: str, folders: Union[(Dict, List[str])], pattern: str) -> List: '\n List all files in a list of folders.\n\n Args:\n folders (List[str]): A list of folders to search for files.\n pattern (str): The pattern to search for. E.g. "*.wav"\n ' file_list = [] file_metadata = [] if isinstance(folders, dict): for (_, val) in folders.items(): folder_metadata = {k: v for (k, v) in val.items() if (k != 'path')} (folder_files, _) = get_files_from_folders(basedir, [val['path']], pattern) file_metadata.extend(([folder_metadata] * len(folder_files))) file_list.extend(folder_files) elif isinstance(folders, list): for folder in folders: file_list.extend(Path(basedir).joinpath(folder).rglob(pattern)) return (file_list, file_metadata)
def create_tarfile(output_file: str, source_dir: str): '\n Create a tarfile from a directory.\n\n Args:\n output_file: The name of the tarfile to create.\n source_dir: The directory to create the tarfile from.\n ' with tarfile.open(output_file, 'w:gz') as tar: for item in tqdm(list(Path(source_dir).rglob('*'))): if item.is_file(): tar.add(item, arcname=item.relative_to(source_dir))
def str2int(s: str) -> int: '\n Convert string to int using hex hashing.\n https://stackoverflow.com/a/16008760/82733\n ' return (int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % ((2 ** 32) - 1))
def load_model(config: str, ckpt: str, include_data: bool=False): '\n Load model from checkpoint\n ' config_parser = ArgumentParser() config_parser.add_subclass_arguments(DrumBlender, 'model', fail_untyped=False) config_parser.add_argument('--trainer', type=dict, default={}) config_parser.add_argument('--seed_everything', type=int) config_parser.add_argument('--ckpt_path', type=str) config_parser.add_argument('--optimizer', type=dict) config_parser.add_argument('--lr_scheduler', type=dict) if include_data: config_parser.add_subclass_arguments(AudioDataModule, 'data') else: config_parser.add_argument('--data', type=dict, default={}) config = config_parser.parse_path(config) init = config_parser.instantiate_classes(config) init_args = inspect.getfullargspec(DrumBlender.__init__).args model_dict = {attr: getattr(init.model, attr) for attr in init_args if ((attr != 'self') and hasattr(init.model, attr))} print(f'Loading checkpoint from {ckpt}...') model = init.model.load_from_checkpoint(ckpt, **model_dict) if include_data: datamodule = init.data return (model, datamodule) return (model, None)
def load_datamodule(config: str): '\n Load a datamodule from a config file\n ' datamodule_parser = ArgumentParser() datamodule_parser.add_subclass_arguments(AudioDataModule, 'datamodule') if (config is not None): with open(config, 'r') as f: config = yaml.safe_load(f) config = {'datamodule': config} datamodule_args = datamodule_parser.parse_object(config) datamodule = datamodule_parser.instantiate_classes(datamodule_args).datamodule return datamodule
def load_config_yaml(config: str): '\n Load a config file\n ' with open(config, 'r') as f: config = yaml.safe_load(f) return config
def main(arguments): parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('indir', help='Input dir -- root log dir for metric csvs', type=str) parser.add_argument('type', help="Table type: ['all', 'instrument']", type=str) parser.add_argument('-o', '--outfile', help='Output file', default=sys.stdout, type=argparse.FileType('w')) args = parser.parse_args(arguments) indir = Path(args.indir) csv_files = [f for f in indir.rglob('metrics.csv') if f.is_file()] rows = [] for f in csv_files: sub_df = pd.read_csv(f) version = f.parent.name assert (version == 'version_0') model_name = f.parent.parent.name for model in models: if model_name.startswith(model): (_, split) = model_name.split(model) split = split[1:] sub_df['instrument'] = 'all' if (split == 'a'): sub_df['source'] = 'acoustic' elif (split == 'e'): sub_df['source'] = 'electronic' elif (split == 'all'): sub_df['source'] = 'all' else: sub_df['instrument'] = split sub_df['source'] = 'all' sub_df['model'] = model rows.append(sub_df) df = pd.concat(rows, ignore_index=True) df = df.sort_values(by=['model', 'source', 'instrument']) if (args.type == 'all'): df = df[(df['instrument'] == 'all')] elif (args.type == 'instrument'): df = df[(df['source'] == 'all')] df = df[(df['instrument'] != 'all')] else: raise ValueError(f'Unknown type {args.type}') print(df) rows = [] for model in models: model_df = df[(df['model'] == model)] model_row = {'model': [model]} if (args.type == 'all'): for source in ['all', 'acoustic', 'electronic']: source_df = model_df[(model_df['source'] == source)] model_row[(source + '_mss')] = [source_df['test/loss'].mean()] model_row[(source + '_lsd')] = [source_df['test/lsd'].mean()] model_row[(source + '_flux_onset')] = [source_df['test/flux_onset'].mean()] else: for inst in ['kick', 'snare', 'tom', 'cymbals']: inst_df = model_df[(model_df['instrument'] == inst)] model_row[(inst + '_mss')] = [inst_df['test/loss'].mean()] model_row[(inst + '_lsd')] = [inst_df['test/lsd'].mean()] model_row[(inst + '_flux_onset')] = [inst_df['test/flux_onset'].mean()] rows.append(pd.DataFrame.from_dict(model_row)) df = pd.concat(rows, ignore_index=True) def float_format(x): if ((x < 10) and (x > 0.1)): return ('%.2f' % x) elif (x < 0.1): return ('%.3f' % x) else: return ('%.1f' % x) df.to_latex(args.outfile, index=False, float_format=float_format)
def pytest_sessionstart(session): wandb.init(mode='disabled')
def import_class(class_path: str): (module_path, class_name) = class_path.rsplit('.', 1) module = __import__(module_path, fromlist=[class_name]) return getattr(module, class_name)
def pytest_generate_tests(metafunc): for (test_type, glob_params) in TEST_TYPES.items(): if (test_type in metafunc.fixturenames): files = glob.glob(**glob_params) metafunc.parametrize(test_type, files)
@pytest.fixture def parser(): parser = LightningArgumentParser() return parser
def read_cfg(cfg: os.PathLike, wrap: Optional[str]='cfg'): with open(cfg, 'r') as f: cfg_string = f.read() if (wrap is not None): cfg_string = f'''{wrap}: {cfg_string}''' cfg_string = cfg_string.replace('\n', '\n ') return cfg_string
def test_can_instantiate_from_data_config(data_cfg, parser): cfg_string = read_cfg(data_cfg) parser.add_lightning_class_args(LightningDataModule, 'cfg', subclass_mode=True, required=True) args = parser.parse_string(cfg_string) assert ('class_path' in args.cfg), 'No class_path key in config root level' class_path = args.cfg['class_path'] objs = parser.instantiate_classes(args) assert isinstance(objs.cfg, import_class(class_path))
def test_can_instantiate_from_loss_config(loss_cfg, parser): cfg_string = read_cfg(loss_cfg) parser.add_argument('cfg', type=Union[(Callable, torch.nn.Module)]) args = parser.parse_string(cfg_string) assert ('class_path' in args.cfg), 'No class_path key in config root level' class_path = args.cfg['class_path'] objs = parser.instantiate_classes(args) assert isinstance(objs.cfg, import_class(class_path)) if isinstance(objs.cfg, torch.nn.Module): assert hasattr(objs.cfg, 'forward'), 'Loss function must have a forward method.' else: assert isinstance(objs.cfg, Callable), 'Loss function must be callable.'
def test_can_instantiate_from_model_config(model_cfg, parser): cfg_string = read_cfg(model_cfg) parser.add_argument('cfg', type=torch.nn.Module) args = parser.parse_string(cfg_string) assert ('class_path' in args.cfg), 'No class_path key in config root level' class_path = args.cfg['class_path'] objs = parser.instantiate_classes(args) assert isinstance(objs.cfg, import_class(class_path))
def test_can_instantiate_from_experiment_config(experiment_cfg, monkeypatch): with monkeypatch.context() as m: import sys m.setattr(sys, 'argv', ['fake_file.py', '-c', str(experiment_cfg), '--trainer.accelerator', 'cpu', '--trainer.devices', '1']) cli = LightningCLI(run=False) assert isinstance(cli.model, LightningModule)
def test_kick_dataset_init_no_data(fs): with pytest.raises(FileNotFoundError): AudioDataset('nonexistent_dir', 'nonexistent_file.json', TEST_SAMPLE_RATE, TEST_NUM_SAMPLES)
def processed_metadata(filename: str): expected_filename = Path(TEST_DATA_DIR).joinpath(TEST_META_FILE) if (filename.name != expected_filename): raise FileNotFoundError metadata = {} for i in range(100): metadata[i] = {'filename': f'kick_{i}.wav', 'sample_pack_key': 'pack_a', 'type': 'electro'} return metadata
def audio_dataset(fs, mocker, **kwargs): fs.create_dir(TEST_DATA_DIR) fs.create_file(Path(TEST_DATA_DIR).joinpath(TEST_META_FILE)) mocker.patch('json.load', side_effect=processed_metadata) return AudioDataset(TEST_DATA_DIR, TEST_META_FILE, TEST_SAMPLE_RATE, TEST_NUM_SAMPLES, **kwargs)
def test_audio_dataset_init_no_split(fs, mocker): dataset = audio_dataset(fs, mocker) assert (len(dataset.file_list) == 100)
def test_audio_dataset_init_train(fs, mocker): dataset = audio_dataset(fs, mocker, split='train') assert (len(dataset.file_list) == 80)
def test_audio_dataset_init_test(fs, mocker): dataset = audio_dataset(fs, mocker, split='test') assert (len(dataset.file_list) == 10)
def test_audio_dataset_init_val(fs, mocker): dataset = audio_dataset(fs, mocker, split='val') assert (len(dataset.file_list) == 10)
def test_audio_dataset_init_invalid_split(fs, mocker): with pytest.raises(ValueError): audio_dataset(fs, mocker, split='invalid')
def test_audio_dataset_init_reproducible(fs, mocker): dataset_a = audio_dataset(fs, mocker) dataset_b = AudioDataset(TEST_DATA_DIR, TEST_META_FILE, TEST_SAMPLE_RATE, TEST_NUM_SAMPLES) assert (dataset_a.file_list == dataset_b.file_list)
def test_audio_dataset_len(fs, mocker): dataset = audio_dataset(fs, mocker) (len(dataset) == 100)
def test_audio_dataset_getitem(fs, mocker): dataset = audio_dataset(fs, mocker) test_audio = torch.rand(1, TEST_NUM_SAMPLES) mocker = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(test_audio, TEST_SAMPLE_RATE)) (audio,) = dataset[0] assert (audio.shape == (1, TEST_NUM_SAMPLES)) assert torch.all((audio == test_audio))
@pytest.fixture def sample_pack_split_metadata(): metadata = {} for i in range(100): pack = 'a' if (i >= 80): pack = 'b' if (i >= 90): pack = 'c' metadata[i] = {'filename': i, 'sample_pack_key': pack, 'type': 'electro'} return metadata
def test_audio_dataset_sample_pack_split(fs, mocker, sample_pack_split_metadata): dataset = audio_dataset(fs, mocker, split_strategy='sample_pack') dataset.metadata = sample_pack_split_metadata dataset._sample_pack_split(split='train', test_size=0.1, val_size=0.1) assert (len(dataset.file_list) == 80) assert (max(dataset.file_list) == 79) dataset._sample_pack_split(split='test', test_size=0.1, val_size=0.1) assert (len(dataset.file_list) == 10) if (min(dataset.file_list) == 80): assert (max(dataset.file_list) == 89) else: assert ((min(dataset.file_list) == 90) and (max(dataset.file_list) == 99)) dataset._sample_pack_split(split='val', test_size=0.1, val_size=0.1) assert (len(dataset.file_list) == 10) if (min(dataset.file_list) == 80): assert (max(dataset.file_list) == 89) else: assert ((min(dataset.file_list) == 90) and (max(dataset.file_list) == 99))
def test_audio_dataset_sample_pack_split_reproducible(fs, mocker, sample_pack_split_metadata): dataset = audio_dataset(fs, mocker, split_strategy='sample_pack') dataset.metadata = sample_pack_split_metadata dataset._sample_pack_split(split='test', test_size=0.1, val_size=0.1) file_list_a = list(dataset.file_list) dataset._sample_pack_split(split='test', test_size=0.1, val_size=0.1) file_list_b = list(dataset.file_list) assert (file_list_a == file_list_b)
@pytest.fixture def fakefs(fs, mocker): '\n Fake FS for testing with a mocked tqdm, which behaves\n poorly with fakefs\n ' mocker.patch(f'{TESTED_MODULE}.tqdm', side_effect=(lambda x: x)) return fs
def test_audio_datamodule_init(): AudioDataModule()
def test_audio_datamodule_prepare_download_archive(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2') mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive') data = AudioDataModule() data.prepare_data() assert (mocked_download.call_args_list == [mock.call(data.archive, data.url, data.bucket)]) assert (mocked_extract.call_args_list == [mock.call(data.archive, data.data_dir)])
def test_audio_datamodule_prepare_datadir_exists(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2') mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive') data = AudioDataModule() fs.create_dir(data.data_dir) data.prepare_data() assert (mocked_download.call_args_list == []) assert (mocked_extract.call_args_list == [])
def test_audio_datamodule_prepare_archive_exists(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2') mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive') data = AudioDataModule() fs.create_file(data.archive) data.prepare_data() assert (mocked_download.call_args_list == []) assert (mocked_extract.call_args_list == [mock.call(data.archive, data.data_dir)])
def test_audio_datamodule_prepare_unprocessed_raise(fs, mocker): data = AudioDataModule() fs.create_dir(data.data_dir) with pytest.raises(RuntimeError): data.prepare_data(use_preprocessed=False)
def test_audio_datamodule_prepare_unprocessed_downloaded(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_full_dataset') mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.AudioDataModule.preprocess_dataset') data = AudioDataModule() fs.create_dir(data.data_dir_unprocessed) data.prepare_data(use_preprocessed=False) assert (mocked_download.call_args_list == []) mocked_preprocess.assert_called_once()
def test_audio_datamodule_prepare_unprocessed_with_downloaded(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_full_dataset') mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.AudioDataModule.preprocess_dataset') data = AudioDataModule() data.prepare_data(use_preprocessed=False) assert (mocked_download.call_args_list == [mock.call(data.url, data.bucket, data.meta_file, data.data_dir_unprocessed)]) mocked_preprocess.assert_called_once()
def unprocessed_metadata(filename: str): data = AudioDataModule() expected_filename = Path(data.data_dir_unprocessed).joinpath(data.meta_file) if (filename.name != expected_filename): raise FileNotFoundError metadata = {'sample_group_1': {'type': 'cool-sounds', 'folders': ['folder1', 'folder2']}, 'sample_group_2': {'type': 'even-cooler-sounds', 'folders': ['folder3']}} return metadata
def create_fake_dataset(metadata: dict, num_files: int, fakefs): data = AudioDataModule() for group in metadata.values(): for folder in group['folders']: for i in range(num_files): fakefs.create_file(Path(data.data_dir_unprocessed).joinpath(folder).joinpath(f'file_{i}.wav'))
def expected_hashed_ouput(filename: str, audio_dir: str): file = Path(filename) output_hash = data_utils.str2int(str(Path(*file.parts[1:]))) output_file = Path(audio_dir).joinpath(f'{output_hash}.wav') return output_file
def test_audio_dataset_preprocess(fakefs, mocker): '\n A bit of a complex test to make sure that all functions and files are\n called as expected from the preprocess_dataset class method.\n ' data = AudioDataModule() meta_file = Path(data.data_dir_unprocessed).joinpath(data.meta_file) fakefs.create_file(meta_file) mocker.patch('json.load', side_effect=unprocessed_metadata) with open(meta_file, 'r') as f: metadata = unprocessed_metadata(f) num_files = 10 create_fake_dataset(metadata, num_files, fakefs) mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.audio_utils.preprocess_audio_file') mocked_jsondump = mocker.patch('json.dump') data.preprocess_dataset() expected_metadata = {} expected_input = {} expected_outfile = {} for (key, group) in metadata.items(): group_type = group['type'] for folder in group['folders']: for i in range(num_files): in_file = Path(data.data_dir_unprocessed).joinpath(folder).joinpath(f'file_{i}.wav') out_file = expected_hashed_ouput(in_file, Path(data.data_dir).joinpath('audio')) hash_key = int(out_file.stem) expected_metadata[hash_key] = {'filename': str(out_file.relative_to(data.data_dir)), 'type': group_type, 'sample_pack_key': key} expected_input[hash_key] = in_file expected_outfile[hash_key] = out_file expected_calls = [] for key in expected_input.keys(): expected_calls.append(mock.call(expected_input[key], expected_outfile[key], data.sample_rate, data.num_samples)) assert (mocked_preprocess.call_args_list == expected_calls) json_call = mocked_jsondump.call_args_list[0] output_metadata = json_call[0][0] for (key, value) in output_metadata.items(): assert (value == expected_metadata[key]) assert (json_call[0][1].name == Path(data.data_dir).joinpath(data.meta_file))
def test_audio_dataset_archive(mocker): data = AudioDataModule() mocked_archive = mocker.patch(f'{TESTED_MODULE}.data_utils.create_tarfile') data.archive_dataset('test.tar.gz') mocked_archive.assert_called_once_with('test.tar.gz', data.data_dir)
def processed_metadata(filename: str): data = AudioDataModule() expected_filename = Path(data.data_dir).joinpath(data.meta_file) if (filename.name != expected_filename): raise FileNotFoundError metadata = {} for i in range(100): metadata[i] = {'filename': f'kick_{i}.wav', 'sample_pack_key': 'pack_a', 'type': 'electro'} return metadata
@pytest.fixture def kick_datamodule(fs, mocker): data = AudioDataModule() fs.create_dir(data.data_dir) fs.create_file(Path(data.data_dir).joinpath(data.meta_file)) mocker.patch('drumblender.data.audio.json.load', side_effect=processed_metadata) return AudioDataModule()
def test_audio_datamodule_setup_train(kick_datamodule): kick_datamodule.setup('fit') assert (len(kick_datamodule.train_dataset) == 80) assert (len(kick_datamodule.val_dataset) == 10) with pytest.raises(AttributeError): kick_datamodule.test_dataset
def test_audio_datamodule_setup_val(kick_datamodule): kick_datamodule.setup('validate') assert (len(kick_datamodule.val_dataset) == 10) with pytest.raises(AttributeError): kick_datamodule.train_dataset with pytest.raises(AttributeError): kick_datamodule.test_dataset
def test_audio_datamodule_setup_test(kick_datamodule): kick_datamodule.setup('test') assert (len(kick_datamodule.test_dataset) == 10) with pytest.raises(AttributeError): kick_datamodule.train_dataset with pytest.raises(AttributeError): kick_datamodule.val_dataset
def test_audio_datamodule_train_data(kick_datamodule, mocker): kick_datamodule.setup('fit') train_loader = kick_datamodule.train_dataloader() assert isinstance(train_loader, DataLoader) mocker = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, kick_datamodule.num_samples), kick_datamodule.sample_rate)) batch = next(iter(train_loader)) assert (batch[0].shape == (kick_datamodule.batch_size, 1, kick_datamodule.num_samples))
def test_modal_datamodule_init(): data = ModalDataModule() assert isinstance(data, ModalDataModule) assert isinstance(data, AudioDataModule)
def test_modal_datamodule_prepare_download_archive(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2') mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive') data = ModalDataModule() data.prepare_data() assert (mocked_download.call_args_list == [mock.call(data.archive, data.url, data.bucket)]) assert (mocked_extract.call_args_list == [mock.call(data.archive, data.data_dir)])
def test_modal_datamodule_prepare_datadir_exists(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2') mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive') data = ModalDataModule() fs.create_dir(data.data_dir) data.prepare_data() assert (mocked_download.call_args_list == []) assert (mocked_extract.call_args_list == [])
def test_modal_datamodule_prepare_archive_exists(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2') mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive') data = ModalDataModule() fs.create_file(data.archive) data.prepare_data() assert (mocked_download.call_args_list == []) assert (mocked_extract.call_args_list == [mock.call(data.archive, data.data_dir)])
def test_modal_datamodule_prepare_unprocessed_raise(fs, mocker): data = ModalDataModule() fs.create_dir(data.data_dir) with pytest.raises(RuntimeError): data.prepare_data(use_preprocessed=False)
def test_modal_datamodule_prepare_unprocessed_downloaded(fs, mocker): mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_full_dataset') mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.ModalDataModule.preprocess_dataset') data = ModalDataModule() fs.create_dir(data.data_dir_unprocessed) data.prepare_data(use_preprocessed=False) assert (mocked_download.call_args_list == []) mocked_preprocess.assert_called_once()
def mock_modal_audio_load(filename, sample_rate, num_samples): filename_parts = Path(filename).parts assert (filename_parts[0] == 'dataset') assert filename_parts[(- 1)].endswith('.wav') return (torch.rand(1, num_samples), sample_rate)
def mock_cqt_call(x, num_samples, num_frames, num_bins): assert (x.shape == (1, num_samples)) freqs = torch.rand(1, num_frames, num_bins) amps = torch.rand(1, num_frames, num_bins) phases = torch.rand(1, num_frames, num_bins) return (freqs, amps, phases)
def processed_modal_metadata(filename: str): data = ModalDataModule() expected_filename = Path(data.data_dir).joinpath(data.meta_file) if (filename.name != expected_filename): raise FileNotFoundError metadata = {} for i in range(100): metadata[i] = {'filename': f'kick_{i}.wav', 'filename_modal': f'kick_{i}_modal.wav', 'features': f'kick_{i}.pt', 'sample_pack_key': 'pack_a', 'type': 'electro'} return metadata
def mock_json_dump_update(metadata, outfile, expected_outfile): assert (outfile.name == expected_outfile)
def run_preprocess_test(data, fakefs, mocker): '\n Make sure that the modal preprocessing is calling all the right\n methods with the expected inputs and ouputs. This involves mocking\n several methods.\n ' fakefs.create_dir(data.data_dir) fakefs.create_file(Path(data.data_dir).joinpath(data.meta_file)) mocker.patch('json.load', side_effect=processed_modal_metadata) mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.AudioDataModule.preprocess_dataset') mocked_load = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', side_effect=partial(mock_modal_audio_load, sample_rate=data.sample_rate, num_samples=data.num_samples)) num_hops = ((data.num_samples // data.hop_length) + 1) mocker.patch.object(CQTModalAnalysis, '__init__', return_value=None) mocker.patch.object(CQTModalAnalysis, '__call__', side_effect=partial(mock_cqt_call, num_samples=data.num_samples, num_frames=num_hops, num_bins=data.n_bins)) mocked_save = mocker.patch(f'{TESTED_MODULE}.torch.save') mocked_jsondump = mocker.patch(f'{TESTED_MODULE}.json.dump', side_effect=partial(mock_json_dump_update, expected_outfile=Path(data.data_dir).joinpath(data.meta_file))) data.preprocess_dataset() mocked_preprocess.assert_called_once() filenames = [] load_calls = [] with open(Path(data.data_dir).joinpath(data.meta_file), 'r') as f: metadata = processed_modal_metadata(f) for idx in metadata: filename = Path(data.data_dir).joinpath(metadata[idx]['filename']) load_calls.append(mocker.call(filename)) mocked_load.assert_has_calls(load_calls) feature_dir = Path(data.data_dir).joinpath('features') mocked_save.assert_has_calls([mocker.call(mocker.ANY, feature_dir.joinpath(Path(f).with_suffix('.pt'))) for f in filenames]) mocked_jsondump.assert_called_once()
def test_modal_dataset_preprocess_no_save_audio(fakefs, mocker): '\n Make sure that the modal preprocessing is calling all the right\n methods with the expected inputs and ouputs. This involves mocking\n several methods.\n ' data = ModalDataModule(sample_rate=16000, num_samples=16000, n_bins=64, hop_length=256, save_modal_audio=False) run_preprocess_test(data, fakefs, mocker)
def test_modal_dataset_preprocess_save_audio(fakefs, mocker): '\n Make sure that the modal preprocessing is calling all the right\n methods with the expected inputs and ouputs. This involves mocking\n several methods.\n ' data = ModalDataModule(sample_rate=16000, num_samples=16000, n_bins=64, hop_length=256, save_modal_audio=True) mocked_synth = mocker.patch(f'{TESTED_MODULE}.modal_synth', return_value=torch.rand(1, data.num_samples)) mocked_save = mocker.patch(f'{TESTED_MODULE}.torchaudio.save') run_preprocess_test(data, fakefs, mocker) assert (mocked_synth.call_count == 100) assert (mocked_save.call_count == 100)
def kick_modal_datamodule(fs, mocker, **kwargs): data = ModalDataModule(**kwargs) fs.create_dir(data.data_dir) fs.create_file(Path(data.data_dir).joinpath(data.meta_file)) mocker.patch('drumblender.data.audio.json.load', side_effect=processed_modal_metadata) return ModalDataModule(**kwargs)