code stringlengths 17 6.64M |
|---|
def test_audio_datamodule_train_data(kick_datamodule, mocker):
kick_datamodule.setup('fit')
train_loader = kick_datamodule.train_dataloader()
assert isinstance(train_loader, DataLoader)
mocker = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, kick_datamodule.num_samples), kick_datamodule.sample_rate))
batch = next(iter(train_loader))
assert (batch[0].shape == (kick_datamodule.batch_size, 1, kick_datamodule.num_samples))
|
def test_modal_datamodule_init():
data = ModalDataModule()
assert isinstance(data, ModalDataModule)
assert isinstance(data, AudioDataModule)
|
def test_modal_datamodule_prepare_download_archive(fs, mocker):
mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2')
mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive')
data = ModalDataModule()
data.prepare_data()
assert (mocked_download.call_args_list == [mock.call(data.archive, data.url, data.bucket)])
assert (mocked_extract.call_args_list == [mock.call(data.archive, data.data_dir)])
|
def test_modal_datamodule_prepare_datadir_exists(fs, mocker):
mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2')
mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive')
data = ModalDataModule()
fs.create_dir(data.data_dir)
data.prepare_data()
assert (mocked_download.call_args_list == [])
assert (mocked_extract.call_args_list == [])
|
def test_modal_datamodule_prepare_archive_exists(fs, mocker):
mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2')
mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive')
data = ModalDataModule()
fs.create_file(data.archive)
data.prepare_data()
assert (mocked_download.call_args_list == [])
assert (mocked_extract.call_args_list == [mock.call(data.archive, data.data_dir)])
|
def test_modal_datamodule_prepare_unprocessed_raise(fs, mocker):
data = ModalDataModule()
fs.create_dir(data.data_dir)
with pytest.raises(RuntimeError):
data.prepare_data(use_preprocessed=False)
|
def test_modal_datamodule_prepare_unprocessed_downloaded(fs, mocker):
mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_full_dataset')
mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.ModalDataModule.preprocess_dataset')
data = ModalDataModule()
fs.create_dir(data.data_dir_unprocessed)
data.prepare_data(use_preprocessed=False)
assert (mocked_download.call_args_list == [])
mocked_preprocess.assert_called_once()
|
def mock_modal_audio_load(filename, sample_rate, num_samples):
filename_parts = Path(filename).parts
assert (filename_parts[0] == 'dataset')
assert filename_parts[(- 1)].endswith('.wav')
return (torch.rand(1, num_samples), sample_rate)
|
def mock_cqt_call(x, num_samples, num_frames, num_bins):
assert (x.shape == (1, num_samples))
freqs = torch.rand(1, num_frames, num_bins)
amps = torch.rand(1, num_frames, num_bins)
phases = torch.rand(1, num_frames, num_bins)
return (freqs, amps, phases)
|
def processed_modal_metadata(filename: str):
data = ModalDataModule()
expected_filename = Path(data.data_dir).joinpath(data.meta_file)
if (filename.name != expected_filename):
raise FileNotFoundError
metadata = {}
for i in range(100):
metadata[i] = {'filename': f'kick_{i}.wav', 'filename_modal': f'kick_{i}_modal.wav', 'features': f'kick_{i}.pt', 'sample_pack_key': 'pack_a', 'type': 'electro'}
return metadata
|
def mock_json_dump_update(metadata, outfile, expected_outfile):
assert (outfile.name == expected_outfile)
|
def run_preprocess_test(data, fakefs, mocker):
'\n Make sure that the modal preprocessing is calling all the right\n methods with the expected inputs and ouputs. This involves mocking\n several methods.\n '
fakefs.create_dir(data.data_dir)
fakefs.create_file(Path(data.data_dir).joinpath(data.meta_file))
mocker.patch('json.load', side_effect=processed_modal_metadata)
mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.AudioDataModule.preprocess_dataset')
mocked_load = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', side_effect=partial(mock_modal_audio_load, sample_rate=data.sample_rate, num_samples=data.num_samples))
num_hops = ((data.num_samples // data.hop_length) + 1)
mocker.patch.object(CQTModalAnalysis, '__init__', return_value=None)
mocker.patch.object(CQTModalAnalysis, '__call__', side_effect=partial(mock_cqt_call, num_samples=data.num_samples, num_frames=num_hops, num_bins=data.n_bins))
mocked_save = mocker.patch(f'{TESTED_MODULE}.torch.save')
mocked_jsondump = mocker.patch(f'{TESTED_MODULE}.json.dump', side_effect=partial(mock_json_dump_update, expected_outfile=Path(data.data_dir).joinpath(data.meta_file)))
data.preprocess_dataset()
mocked_preprocess.assert_called_once()
filenames = []
load_calls = []
with open(Path(data.data_dir).joinpath(data.meta_file), 'r') as f:
metadata = processed_modal_metadata(f)
for idx in metadata:
filename = Path(data.data_dir).joinpath(metadata[idx]['filename'])
load_calls.append(mocker.call(filename))
mocked_load.assert_has_calls(load_calls)
feature_dir = Path(data.data_dir).joinpath('features')
mocked_save.assert_has_calls([mocker.call(mocker.ANY, feature_dir.joinpath(Path(f).with_suffix('.pt'))) for f in filenames])
mocked_jsondump.assert_called_once()
|
def test_modal_dataset_preprocess_no_save_audio(fakefs, mocker):
'\n Make sure that the modal preprocessing is calling all the right\n methods with the expected inputs and ouputs. This involves mocking\n several methods.\n '
data = ModalDataModule(sample_rate=16000, num_samples=16000, n_bins=64, hop_length=256, save_modal_audio=False)
run_preprocess_test(data, fakefs, mocker)
|
def test_modal_dataset_preprocess_save_audio(fakefs, mocker):
'\n Make sure that the modal preprocessing is calling all the right\n methods with the expected inputs and ouputs. This involves mocking\n several methods.\n '
data = ModalDataModule(sample_rate=16000, num_samples=16000, n_bins=64, hop_length=256, save_modal_audio=True)
mocked_synth = mocker.patch(f'{TESTED_MODULE}.modal_synth', return_value=torch.rand(1, data.num_samples))
mocked_save = mocker.patch(f'{TESTED_MODULE}.torchaudio.save')
run_preprocess_test(data, fakefs, mocker)
assert (mocked_synth.call_count == 100)
assert (mocked_save.call_count == 100)
|
def kick_modal_datamodule(fs, mocker, **kwargs):
data = ModalDataModule(**kwargs)
fs.create_dir(data.data_dir)
fs.create_file(Path(data.data_dir).joinpath(data.meta_file))
mocker.patch('drumblender.data.audio.json.load', side_effect=processed_modal_metadata)
return ModalDataModule(**kwargs)
|
def test_modal_datamodule_setup_train(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('fit')
assert (len(dm.train_dataset) == 80)
assert (len(dm.val_dataset) == 10)
with pytest.raises(AttributeError):
dm.test_dataset
|
def test_modal_datamodule_setup_val(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('validate')
assert (len(dm.val_dataset) == 10)
with pytest.raises(AttributeError):
dm.test_dataset
with pytest.raises(AttributeError):
dm.train_dataset
|
def test_modal_datamodule_setup_test(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('test')
assert (len(dm.test_dataset) == 10)
with pytest.raises(AttributeError):
dm.val_dataset
with pytest.raises(AttributeError):
dm.train_dataset
|
def test_modal_datamodule_train_data(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('fit')
train_loader = dm.train_dataloader()
assert isinstance(train_loader, DataLoader)
_ = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, dm.num_samples), dm.sample_rate))
(audio_batch,) = next(iter(train_loader))
assert (audio_batch.shape == (dm.batch_size, 1, dm.num_samples))
|
def test_modal_datamodule_audio_param_dataset_train(fs, mocker):
dm = kick_modal_datamodule(fs, mocker, batch_size=8, dataset_class=AudioWithParametersDataset, dataset_kwargs={'parameter_key': 'features'})
dm.setup('fit')
train_loader = dm.train_dataloader()
assert isinstance(train_loader, DataLoader)
mock_audio_load = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, dm.num_samples), dm.sample_rate))
mock_feature_load = mocker.patch(f'{TESTED_MODULE}.torch.load', return_value=torch.rand(3, 4, 10))
(audio_batch, parameters) = next(iter(train_loader))
assert (audio_batch.shape == (dm.batch_size, 1, dm.num_samples))
assert (parameters.shape == (dm.batch_size, 3, 4, 10))
assert (mock_audio_load.call_count == dm.batch_size)
assert (mock_feature_load.call_count == dm.batch_size)
|
def test_film_correctly_forwards_input():
batch_size = 11
in_channels = 13
seq_len = 31
film_embedding_size = 7
film = FiLM(film_embedding_size, in_channels)
x = torch.testing.make_tensor(batch_size, in_channels, seq_len, device='cpu', dtype=torch.float32)
film_embedding = torch.testing.make_tensor(batch_size, film_embedding_size, device='cpu', dtype=torch.float32, requires_grad=True)
y = film(x, film_embedding)
assert (y.shape == (batch_size, in_channels, seq_len))
(dy_dx,) = torch.autograd.grad(y.sum().square(), film_embedding)
assert (dy_dx.abs() > 0.0).all()
|
def test_film_can_toggle_batch_norm(mocker):
spy_batch_norm_init = mocker.spy(torch.nn.BatchNorm1d, '__init__')
spy_batch_norm_forward = mocker.spy(torch.nn.BatchNorm1d, 'forward')
batch_size = 7
in_channels = 13
seq_len = 37
film_embedding_size = 5
x = torch.testing.make_tensor(batch_size, in_channels, seq_len, device='cpu', dtype=torch.float32)
film_embedding = torch.testing.make_tensor(batch_size, film_embedding_size, device='cpu', dtype=torch.float32)
film = FiLM(film_embedding_size, in_channels, use_batch_norm=True)
film(x, film_embedding)
assert (spy_batch_norm_init.call_count == 1)
assert (spy_batch_norm_forward.call_count == 1)
film = FiLM(film_embedding_size, in_channels, use_batch_norm=False)
film(x, film_embedding)
assert (spy_batch_norm_init.call_count == 1)
assert (spy_batch_norm_forward.call_count == 1)
|
def test_gated_activation_correctly_forwards_input():
batch_size = 11
out_channels = 17
seq_len = 23
ga = GatedActivation()
x = torch.testing.make_tensor(batch_size, (out_channels * 2), seq_len, device='cpu', dtype=torch.float32)
y = ga(x)
assert (y.shape == (batch_size, out_channels, seq_len))
|
def test_gated_activation_gates_input():
batch_size = 11
out_channels = 17
seq_len = 23
ga = GatedActivation()
x_1 = torch.testing.make_tensor(batch_size, out_channels, seq_len, device='cpu', dtype=torch.float32)
x_2 = torch.testing.make_tensor(batch_size, out_channels, seq_len, device='cpu', dtype=torch.float32, low=(- 100000000.0), high=(- 100000000.0))
x = torch.cat([x_1, x_2], dim=1)
y = ga(x)
assert (y.shape == (batch_size, out_channels, seq_len))
assert (y.abs().sum() == 0.0)
|
def test_tfilm_correctly_forwards_input():
batch_size = 3
channels = 11
block_size = 16
seq_len = (block_size * 10)
tfilm = TFiLM(channels=channels, block_size=block_size)
x = torch.testing.make_tensor(batch_size, channels, seq_len, device='cpu', dtype=torch.float32)
y = tfilm(x)
assert (y.shape == (batch_size, channels, seq_len))
|
def test_dummy_parameter_encoder_can_be_instantiated():
model = DummyParameterEncoder((1, 1))
assert (model is not None)
|
def test_dummy_parameter_encoder_can_forward():
model = DummyParameterEncoder((1, 1))
(output, _) = model(torch.rand(1, 1))
assert (output.shape == (1, 1))
assert output.requires_grad
|
def test_modal_amp_parameters_can_forward():
batch_size = 7
num_params = 3
num_modes = 45
num_steps = 400
fake_modal_params = torch.rand(batch_size, num_params, num_modes, num_steps)
model = ModalAmpParameters((num_modes + 10))
(output, _) = model(None, fake_modal_params)
assert (output.shape == (batch_size, num_params, num_modes, num_steps))
|
def test_soundstream_attention_encoder_forwards(mocker):
batch_size = 3
input_channels = 1
hidden_channels = 2
output_channels = 12
x = torch.rand(batch_size, 1, 512)
encoder = SoundStreamAttentionEncoder(input_channels, hidden_channels, output_channels)
result = encoder(x)
assert (result.shape == (batch_size, output_channels))
|
@pytest.fixture
def noise_gen():
return NoiseGenerator(window_size=512)
|
def test_noise_generator_produces_correct_output_size(noise_gen):
hop_size = 256
batch_size = 16
frame_length = 690
num_filters = 120
x = torch.rand(batch_size, frame_length, num_filters)
y = noise_gen(x)
assert (y.shape == (batch_size, ((hop_size * (frame_length - 1)) + (hop_size * 2))))
|
class FakeLogger(pl.loggers.WandbLogger):
def __init__(self, stub):
self.stub = stub
def __getattr__(self, name: str) -> Any:
if (name == 'experiment'):
return self
else:
return super().__getattr__(name)
def log(self, *args, **kwargs):
self.stub(*args, **kwargs)
|
class FakeModule(pl.LightningModule):
def __init__(self, fake_logger):
super().__init__()
self.fake_logger = fake_logger
def __getattribute__(self, __name: str) -> Any:
if (__name == 'logger'):
return self.fake_logger
else:
return super().__getattribute__(__name)
def forward(self, x, *args, **kwargs):
return x
|
def test_callback_correctly_interleaves_audio(monkeypatch, mocker):
sample_rate = 48000
callback = LogAudioCallback(on_train=True, on_val=True, on_test=True, log_on_epoch_end=True, save_audio_sr=sample_rate)
log_stub = mocker.stub('logger')
logger = FakeLogger(log_stub)
model = FakeModule(logger)
trainer = None
callback.setup(trainer, model, 'fit')
FAKE_RETURN = 'fake return'
audio_patch = mocker.patch('drumblender.callbacks.Audio')
audio_patch.return_value = FAKE_RETURN
expected_output = torch.tensor([1, (- 1), 11, (- 11), 2, (- 2), 12, (- 12), 3, (- 3), 13, (- 13)]).numpy()
for i in range(1, 4):
fake_conditioning = torch.tensor([[[(- i)]], [[((- 10) - i)]]])
fake_targets = torch.tensor([[[i]], [[(10 + i)]]])
batch = (fake_targets, fake_conditioning)
callback.on_train_batch_start(trainer, model, batch, 0)
model(fake_conditioning)
callback.on_train_batch_end(trainer, model, 0.0, batch, 0)
callback.on_train_epoch_end(trainer, model)
assert (audio_patch.call_count == 1)
(actual_output,) = audio_patch.call_args.args
caption = audio_patch.call_args.kwargs['caption']
actual_sample_rate = audio_patch.call_args.kwargs['sample_rate']
assert (actual_output == expected_output).all()
assert (caption == 'train/audio')
assert (actual_sample_rate == sample_rate)
log_stub.assert_called_once_with({'train/audio': FAKE_RETURN})
|
def test_clean_wandb_cache_callback_cleans_wandb_cache(monkeypatch, mocker):
callback = CleanWandbCacheCallback(every_n_epochs=2, max_size_in_gb=1)
class FakeTrainer():
current_epoch: int = 0
trainer = FakeTrainer()
model = None
expected_args = ['wandb', 'artifact', 'cache', 'cleanup', '1GB']
fake_Popen = mocker.stub('subprocess.Popen')
monkeypatch.setattr(subprocess, 'Popen', fake_Popen)
for _ in range(4):
callback.on_train_epoch_end(trainer, model)
trainer.current_epoch += 1
fake_Popen.assert_has_calls(([mocker.call(expected_args)] * 2))
assert (fake_Popen.call_count == 2)
|
def test_save_config_callback_renames_correctly(mocker, fs):
def create_config_file(*args, **kwargs):
fs.create_file('not_experiment_dir/config.yaml')
class FakeExperiment():
dir = 'experiment_dir'
class FakeLogger(pl.loggers.WandbLogger):
def __init__(self, *args, **kwargs):
pass
@property
def experiment(self):
return FakeExperiment()
mock_init = mocker.patch('drumblender.callbacks.SaveConfigCallback.__init__', return_value=None)
mock_setup = mocker.patch('drumblender.callbacks.SaveConfigCallback.setup', side_effect=create_config_file)
class FakeTrainer():
logger = FakeLogger()
log_dir = 'not_experiment_dir'
trainer = FakeTrainer()
model = None
callback = SaveConfigCallbackWanb()
callback.setup(trainer, model, 'fit')
assert fs.exists('experiment_dir/model-config.yaml')
mock_init.assert_called_once()
mock_setup.assert_called_once()
|
def test_save_config_callback_just_calls_setup_for_non_wandb_logger(mocker, fs):
def create_config_file(*args, **kwargs):
fs.create_file('not_experiment_dir/config.yaml')
class FakeNonWandbLogger():
pass
mock_init = mocker.patch('drumblender.callbacks.SaveConfigCallback.__init__', return_value=None)
mock_setup = mocker.patch('drumblender.callbacks.SaveConfigCallback.setup', side_effect=create_config_file)
class FakeTrainer():
logger = FakeNonWandbLogger()
log_dir = 'not_experiment_dir'
trainer = FakeTrainer()
model = None
callback = SaveConfigCallbackWanb()
callback.setup(trainer, model, 'fit')
assert fs.exists('not_experiment_dir/config.yaml')
mock_init.assert_called_once()
mock_setup.assert_called_once()
|
def test_first_order_difference_loss():
loss_fn = loss.FirstOrderDifferenceLoss()
pred = torch.ones(1, 1, 100)
target = torch.ones(1, 1, 100)
assert (loss_fn(pred, target) == 0.0)
|
def test_weighted_loss_forwards():
loss_fn = loss.WeightedLoss([torch.nn.L1Loss(), torch.nn.L1Loss()], weights=[2.0, 1.0])
pred = torch.ones(1, 1, 100)
target = torch.zeros(1, 1, 100)
assert (loss_fn(pred, target) == 3.0)
|
def test_weighted_loss_forwards_no_weights():
loss_fn = loss.WeightedLoss([torch.nn.L1Loss(), torch.nn.L1Loss()])
pred = torch.ones(1, 1, 100)
target = torch.zeros(1, 1, 100)
assert (loss_fn(pred, target) == 2.0)
|
def test_weighted_loss_different_weights():
with pytest.raises(AssertionError):
loss.WeightedLoss([torch.nn.L1Loss()], weights=[2.0, 1.0])
|
def test_weighted_loss_with_jsonargparse_config(monkeypatch):
monkeypatch.setattr(torch.nn.L1Loss, 'forward', (lambda self, x, y: 1.0))
monkeypatch.setattr(torch.nn.MSELoss, 'forward', (lambda self, x, y: 20.0))
expected_loss = 4.0
config = 'loss:\n class_path: drumblender.loss.WeightedLoss\n init_args:\n loss_fns: \n - class_path: torch.nn.L1Loss\n init_args:\n reduction: mean\n - class_path: torch.nn.MSELoss\n init_args:\n reduction: sum\n weights: [2.0, 0.1]'
parser = jsonargparse.ArgumentParser()
parser.add_argument('--loss', type=torch.nn.Module)
args = parser.parse_string(config)
objs = parser.instantiate_classes(args)
shape = (13, 4, 9, 2)
a = torch.testing.make_tensor(*shape, dtype=torch.float32, device='cpu')
b = torch.testing.make_tensor(*shape, dtype=torch.float32, device='cpu')
actual_loss = objs.loss(a, b)
assert (actual_loss == expected_loss)
|
def test_drumblender_can_be_instantiated(mocker):
modal_synth = mocker.stub('modal_synth')
loss_fn = mocker.stub('loss_fn')
model = DrumBlender(modal_synth=modal_synth, loss_fn=loss_fn)
assert (model is not None)
assert (model.modal_synth == modal_synth)
assert (model.loss_fn == loss_fn)
|
def test_drumblender_can_forward_modal(mocker):
class FakeSynth(torch.nn.Module):
def __init__(self, output):
super().__init__()
self.output = output
def forward(self, p, length=None):
return self.output
loss_fn = mocker.stub('loss_fn')
expected_output = torch.rand(1, 1)
modal_synth = FakeSynth(expected_output)
modal_spy = mocker.spy(modal_synth, 'forward')
batch_size = 7
num_params = 3
num_modes = 45
num_steps = 400
x = torch.rand(batch_size, 1, 1)
p = torch.rand(batch_size, num_params, num_modes, num_steps)
model = DrumBlender(modal_synth=modal_synth, loss_fn=loss_fn)
y = model(x, p)
assert (y == expected_output)
modal_spy.assert_called_once_with(p, x.size((- 1)))
|
def test_drumblender_forwards_all(mocker):
class FakeModule(torch.nn.Module):
def __init__(self, output):
super().__init__()
self.output = output
def forward(self, *args):
return self.output
batch_size = 7
num_samples = 1024
num_params = 3
num_modes = 45
num_steps = 400
embedding_size = 12
latent_size = 3
loss_fn = mocker.stub('loss_fn')
expected_encoder_output = torch.rand(batch_size, embedding_size)
encoder = FakeModule(expected_encoder_output)
encoder_spy = mocker.spy(encoder, 'forward')
expected_modal_encoder_output = (torch.rand(batch_size, embedding_size), torch.rand(batch_size, latent_size))
modal_encoder = FakeModule(expected_modal_encoder_output)
modal_encoder_spy = mocker.spy(modal_encoder, 'forward')
expected_noise_encoder_output = (torch.rand(batch_size, embedding_size), torch.rand(batch_size, latent_size))
noise_encoder = FakeModule(expected_noise_encoder_output)
noise_encoder_spy = mocker.spy(noise_encoder, 'forward')
expected_transient_encoder_output = (torch.rand(batch_size, embedding_size), torch.rand(batch_size, latent_size))
transient_encoder = FakeModule(expected_transient_encoder_output)
transient_encoder_spy = mocker.spy(transient_encoder, 'forward')
expected_modal_output = torch.rand(batch_size, 1, num_samples)
modal_synth = FakeModule(expected_modal_output)
modal_spy = mocker.spy(modal_synth, 'forward')
expected_noise_output = torch.rand(batch_size, num_samples)
noise_synth = FakeModule(expected_noise_output)
noise_spy = mocker.spy(noise_synth, 'forward')
expected_transient_output = torch.rand(batch_size, 1, num_samples)
transient_synth = FakeModule(expected_transient_output)
transient_spy = mocker.spy(transient_synth, 'forward')
x = torch.rand(batch_size, 1, num_samples)
p = torch.rand(batch_size, num_params, num_modes, num_steps)
model = DrumBlender(loss_fn=loss_fn, encoder=encoder, modal_autoencoder=modal_encoder, noise_autoencoder=noise_encoder, transient_autoencoder=transient_encoder, modal_synth=modal_synth, noise_synth=noise_synth, transient_synth=transient_synth, transient_takes_noise=True)
y = model(x, p)
encoder_spy.assert_called_once_with(x)
modal_encoder_spy.assert_called_once_with(expected_encoder_output, p)
noise_encoder_spy.assert_called_once_with(expected_encoder_output)
transient_encoder_spy.assert_called_once_with(expected_encoder_output)
modal_spy.assert_called_once_with(expected_modal_encoder_output[0], x.size((- 1)))
noise_spy.assert_called_once_with(expected_noise_encoder_output[0], x.size((- 1)))
transient_input = (expected_modal_output + rearrange(expected_noise_output, 'b t -> b () t'))
torch.testing.assert_close(transient_spy.call_args_list[0][0][0], transient_input)
torch.testing.assert_close(transient_spy.call_args_list[0][0][1], expected_transient_encoder_output[0])
assert torch.all((y == expected_transient_output))
|
def preprocess_audio_file(path_factory, in_sr=16000, out_sr=16000, in_dur=1.0, out_dur=1.0, in_stereo=False, amp=1.0):
n = int((in_dur * in_sr))
audio = (audio_utils.generate_sine_wave(440, n, in_sr, in_stereo) * amp)
input_file = (path_factory.mktemp('data') / 'test_input.wav')
torchaudio.save(input_file, audio, in_sr)
assert ((audio.shape[0] == 2) if in_stereo else 1)
output_file = (path_factory.mktemp('data') / 'test_preprocessed.wav')
audio_utils.preprocess_audio_file(input_file=input_file, output_file=output_file, sample_rate=out_sr, num_samples=int((out_dur * out_sr)))
assert output_file.exists()
return output_file
|
def test_preprocess_audio_file_noresample(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 16000
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
|
def test_preprocess_audio_file_resample(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
|
def test_preprocess_audio_file_resample_stereo(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate, in_stereo=True)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
assert (waveform.shape[0] == 1)
|
def test_preprocess_audio_file_resample_pad(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
input_duration = 1.0
target_duration = 2.0
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate, in_dur=input_duration, out_dur=target_duration)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
assert (waveform.shape[1] == int((target_duration * target_sample_rate)))
|
def test_preprocess_audio_file_resample_trim(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
input_duration = 1.0
target_duration = 0.5
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate, in_dur=input_duration, out_dur=target_duration)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
assert (waveform.shape[1] == int((target_duration * target_sample_rate)))
|
def test_preprocess_audio_file_raises_warning_on_quiet_sound(tmp_path_factory):
with pytest.raises(ValueError, match='Entire wavfile below threshold level'):
preprocess_audio_file(tmp_path_factory, amp=1e-06)
|
def test_first_non_silent_sample_returns_correct_sample():
waveform = torch.zeros(1000)
waveform[500:] = 1.0
first_non_silent_sample = audio_utils.first_non_silent_sample(waveform, frame_size=100, hop_size=100)
assert (first_non_silent_sample == 500)
|
def test_first_non_silent_sample_thresholding_works_correctly():
threshold_db = (- 20.0)
waveform = torch.zeros(1000)
waveform[:500] = (np.power(10.0, (threshold_db / 20.0)) * 0.99)
waveform[500:] = (np.power(10.0, (threshold_db / 20.0)) * 1.01)
first_non_silent_sample = audio_utils.first_non_silent_sample(waveform, frame_size=100, hop_size=100, threshold_db=threshold_db)
assert (first_non_silent_sample == 500)
|
def test_first_non_silent_sample_below_threshold_returns_none():
threshold_db = (- 20.0)
waveform = ((torch.ones(1000) * np.power(10.0, (threshold_db / 20.0))) * 0.99)
first_non_silent_sample = audio_utils.first_non_silent_sample(waveform, frame_size=100, hop_size=100, threshold_db=threshold_db)
assert (first_non_silent_sample is None)
|
def test_cut_start_silence_raises_error_for_incorrect_tensor_shape():
waveform = torch.zeros(1000)
with pytest.raises(AssertionError):
audio_utils.cut_start_silence(waveform)
|
def test_cut_start_silence_clips_tensor(mocker):
mock = mocker.patch('drumblender.utils.audio.first_non_silent_sample')
mock.side_effect = [200, 100]
waveform = torch.zeros(2, 1000)
waveform = audio_utils.cut_start_silence(waveform)
assert (waveform.shape == (2, 900))
|
def test_cut_start_silence_raises_error_for_silent_input(mocker):
_ = mocker.patch('drumblender.utils.audio.first_non_silent_sample', return_value=None)
waveform = torch.zeros(1, 1000)
with pytest.raises(ValueError):
audio_utils.cut_start_silence(waveform)
|
def test_modal_analysis_init():
sample_rate = 48000
x = modal_analysis.CQTModalAnalysis(sample_rate)
assert (x.sample_rate == sample_rate)
|
def test_modal_analysis_spectrogram():
sample_rate = 48000
hop_length = 256
num_bins = 64
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=hop_length, n_bins=num_bins)
waveform = torch.randn(1, 48000)
spec = x.spectrogram(waveform)
num_frames = ((waveform.shape[1] // hop_length) + 1)
assert (spec.shape == (1, num_bins, num_frames, 2))
spec = x.spectrogram(waveform, complex=False)
assert (spec.shape == (1, num_bins, num_frames))
|
def test_modal_analysis_modal_tracking():
sample_rate = 48000
hop_length = 256
num_bins = 64
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=hop_length, n_bins=num_bins)
freq_bin = 36
waveform = audio_utils.generate_sine_wave(x.frequencies()[freq_bin], 48000, sample_rate)
spec = x.spectrogram(waveform, complex=True)
spec = spec[0].numpy()
(freqs, amps, phases) = x.modal_tracking(spec)
max_track = 0
max_i = 0
for (i, track) in enumerate(amps):
if (sum(track) > max_track):
max_track = sum(track)
max_i = i
assert np.isclose(np.mean(freqs[max_i]), 36, atol=0.025)
|
def test_modal_analysis_create_modal_tensors():
sample_rate = 48000
hop_length = 256
num_bins = 64
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=hop_length, n_bins=num_bins)
assert (x.sample_rate == sample_rate)
|
def test_modal_analysis_call():
sample_rate = 16000
waveform = audio_utils.generate_sine_wave(440, num_samples=sample_rate, sample_rate=sample_rate)
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=256, n_bins=60, min_length=10, num_modes=1, threshold=(- 80.0))
(freqs, amps, phases) = x(waveform)
expected_hops = ((waveform.shape[1] // 256) + 1)
assert (freqs.shape == (1, 1, expected_hops))
assert (amps.shape == (1, 1, expected_hops))
assert (phases.shape == (1, 1, expected_hops))
|
class LiviaSoftmax(HyperDenseNetConvLayer):
' Final Classification layer with Softmax '
def __init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType=0, dropoutRate=0.0, softmaxTemperature=1.0):
HyperDenseNetConvLayer.__init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType, dropoutRate)
self._numberOfOutputClasses = None
self._bClassLayer = None
self._softmaxTemperature = None
self._numberOfOutputClasses = filterShape[0]
self._softmaxTemperature = softmaxTemperature
outputOfConvTrain = self.outputTrain
outputOfConvTest = self.outputTest
outputOfConvShapeTrain = self.outputShapeTrain
outputOfConvShapeTest = self.outputShapeTest
b_values = np.zeros(self._numberOfFeatureMaps, dtype='float32')
self._bClassLayer = theano.shared(value=b_values, borrow=True)
inputToSoftmaxTrain = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTrain)
inputToSoftmaxTest = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTest)
self.params = (self.params + [self._bClassLayer])
(self.p_y_given_x_train, self.y_pred_train) = applySoftMax(inputToSoftmaxTrain, outputOfConvShapeTrain, self._numberOfOutputClasses, softmaxTemperature)
(self.p_y_given_x_test, self.y_pred_test) = applySoftMax(inputToSoftmaxTest, outputOfConvShapeTest, self._numberOfOutputClasses, softmaxTemperature)
def negativeLogLikelihoodWeighted(self, y, weightPerClass):
e1 = np.finfo(np.float32).tiny
addTinyProbMatrix = (T.lt(self.p_y_given_x_train, (4 * e1)) * e1)
weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x')
log_p_y_given_x_train = T.log((self.p_y_given_x_train + addTinyProbMatrix))
weighted_log_probs = (log_p_y_given_x_train * weights)
wShape = weighted_log_probs.shape
idx0 = T.arange(wShape[0]).dimshuffle(0, 'x', 'x', 'x')
idx2 = T.arange(wShape[2]).dimshuffle('x', 0, 'x', 'x')
idx3 = T.arange(wShape[3]).dimshuffle('x', 'x', 0, 'x')
idx4 = T.arange(wShape[4]).dimshuffle('x', 'x', 'x', 0)
return (- T.mean(weighted_log_probs[(idx0, y, idx2, idx3, idx4)]))
def predictionProbabilities(self):
return self.p_y_given_x_test
|
def computeDice(autoSeg, groundTruth):
' Returns\n -------\n DiceArray : floats array\n \n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0 '
n_classes = int((np.max(groundTruth) + 1))
DiceArray = []
for c_i in xrange(1, n_classes):
idx_Auto = np.where((autoSeg.flatten() == c_i))[0]
idx_GT = np.where((groundTruth.flatten() == c_i))[0]
autoArray = np.zeros(autoSeg.size, dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size, dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray)
DiceArray.append(dsc)
return DiceArray
|
def dice(im1, im2):
'\n Computes the Dice coefficient\n ----------\n im1 : boolean array\n im2 : boolean array\n \n If they are not boolean, they will be converted.\n \n -------\n It returns the Dice coefficient as a float on the range [0,1].\n 1: Perfect overlapping \n 0: Not overlapping \n '
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if (im1.size != im2.size):
raise ValueError('Size mismatch between input arrays!!!')
im_sum = (im1.sum() + im2.sum())
if (im_sum == 0):
return 1.0
intersection = np.logical_and(im1, im2)
return ((2.0 * intersection.sum()) / im_sum)
|
def applyActivationFunction_Sigmoid(inputData):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
outputData = T.nnet.sigmoid(inputData)
return outputData
|
def applyActivationFunction_Tanh(inputData):
'inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2])'
outputData = T.tanh(inputData)
return outputData
|
def applyActivationFunction_ReLU_v1(inputData):
' inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2]) '
return T.maximum(inputData, 0)
|
def applyActivationFunction_ReLU_v2(inputData):
return T.switch((inputData < 0.0), 0.0, inputData)
|
def applyActivationFunction_ReLU_v3(inputData):
return ((inputData + abs(inputData)) / 2.0)
|
def applyActivationFunction_ReLU_v4(inputData):
return (((T.sgn(inputData) + 1) * inputData) * 0.5)
|
def applyActivationFunction_LeakyReLU(inputData, leakiness):
'leakiness : float\n Slope for negative input, usually between 0 and 1.\n A leakiness of 0 will lead to the standard rectifier,\n a leakiness of 1 will lead to a linear activation function,\n and any value in between will give a leaky rectifier.\n \n [1] Maas et al. (2013):\n Rectifier Nonlinearities Improve Neural Network Acoustic Models,\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n \n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
pos = (0.5 * (1 + leakiness))
neg = (0.5 * (1 - leakiness))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
def applyActivationFunction_PReLU(inputData, PreluActivations):
'Parametric Rectified Linear Unit.\n It follows:\n `f(x) = alpha * x for x < 0`,\n `f(x) = x for x >= 0`,\n where `alpha` is a learned array with the same shape as x.\n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = T.maximum(0, inputData)
neg = ((preluActivationsAsRow * (inputData - abs(inputData))) * 0.5)
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v2(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = ((inputData + abs(inputData)) / 2.0)
neg = (preluActivationsAsRow * ((inputData - abs(inputData)) / 2.0))
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v3(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = (0.5 * (1 + preluActivationsAsRow))
neg = (0.5 * (1 - preluActivationsAsRow))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
def apply_Dropout(rng, dropoutRate, inputShape, inputData, task):
' Task:\n # 0: Training\n # 1: Validation\n # 2: Testing '
outputData = inputData
if (dropoutRate > 0.001):
activationRate = (1 - dropoutRate)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
dropoutMask = srng.binomial(n=1, size=inputShape, p=activationRate, dtype=theano.config.floatX)
if (task == 0):
outputData = (inputData * dropoutMask)
else:
outputData = (inputData * activationRate)
return outputData
|
def convolveWithKernel(W, filter_shape, inputSample, inputSampleShape):
wReshapedForConv = W.dimshuffle(0, 4, 1, 2, 3)
wReshapedForConvShape = (filter_shape[0], filter_shape[4], filter_shape[1], filter_shape[2], filter_shape[3])
inputSampleReshaped = inputSample.dimshuffle(0, 4, 1, 2, 3)
inputSampleReshapedShape = (inputSampleShape[0], inputSampleShape[4], inputSampleShape[1], inputSampleShape[2], inputSampleShape[3])
convolved_Output = T.nnet.conv3d2d.conv3d(inputSampleReshaped, wReshapedForConv, inputSampleReshapedShape, wReshapedForConvShape, border_mode='valid')
output = convolved_Output.dimshuffle(0, 2, 3, 4, 1)
outputShape = [inputSampleShape[0], filter_shape[0], ((inputSampleShape[2] - filter_shape[2]) + 1), ((inputSampleShape[3] - filter_shape[3]) + 1), ((inputSampleShape[4] - filter_shape[4]) + 1)]
return (output, outputShape)
|
def applyBn(numberEpochApplyRolling, inputTrain, inputTest, inputShapeTrain):
numberOfChannels = inputShapeTrain[1]
gBn_values = np.ones(numberOfChannels, dtype='float32')
gBn = theano.shared(value=gBn_values, borrow=True)
bBn_values = np.zeros(numberOfChannels, dtype='float32')
bBn = theano.shared(value=bBn_values, borrow=True)
muArray = theano.shared(np.zeros((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
varArray = theano.shared(np.ones((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
sharedNewMu_B = theano.shared(np.zeros(numberOfChannels, dtype='float32'), borrow=True)
sharedNewVar_B = theano.shared(np.ones(numberOfChannels, dtype='float32'), borrow=True)
e1 = np.finfo(np.float32).tiny
mu_B = inputTrain.mean(axis=[0, 2, 3, 4])
mu_B = T.unbroadcast(mu_B, 0)
var_B = inputTrain.var(axis=[0, 2, 3, 4])
var_B = T.unbroadcast(var_B, 0)
var_B_plusE = (var_B + e1)
mu_RollingAverage = muArray.mean(axis=0)
effectiveSize = (((inputShapeTrain[0] * inputShapeTrain[2]) * inputShapeTrain[3]) * inputShapeTrain[4])
var_RollingAverage = ((effectiveSize / (effectiveSize - 1)) * varArray.mean(axis=0))
var_RollingAverage_plusE = (var_RollingAverage + e1)
normXi_train = ((inputTrain - mu_B.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_B_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_train = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_train) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
normXi_test = ((inputTest - mu_RollingAverage.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_RollingAverage_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_test = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_test) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
return (normYi_train, normYi_test, gBn, bBn, muArray, varArray, sharedNewMu_B, sharedNewVar_B, mu_B, var_B)
|
def applySoftMax(inputSample, inputSampleShape, numClasses, softmaxTemperature):
inputSampleReshaped = inputSample.dimshuffle(0, 2, 3, 4, 1)
inputSampleFlattened = inputSampleReshaped.flatten(1)
numClassifiedVoxels = ((inputSampleShape[2] * inputSampleShape[3]) * inputSampleShape[4])
firstDimOfinputSample2d = (inputSampleShape[0] * numClassifiedVoxels)
inputSample2d = inputSampleFlattened.reshape((firstDimOfinputSample2d, numClasses))
p_y_given_x_2d = T.nnet.softmax((inputSample2d / softmaxTemperature))
p_y_given_x_class = p_y_given_x_2d.reshape((inputSampleShape[0], inputSampleShape[2], inputSampleShape[3], inputSampleShape[4], inputSampleShape[1]))
p_y_given_x = p_y_given_x_class.dimshuffle(0, 4, 1, 2, 3)
y_pred = T.argmax(p_y_given_x, axis=1)
return (p_y_given_x, y_pred)
|
def applyBiasToFeatureMaps(bias, featMaps):
featMaps = (featMaps + bias.dimshuffle('x', 0, 'x', 'x', 'x'))
return featMaps
|
class parserConfigIni(object):
def __init__(_self):
_self.networkName = []
def readConfigIniFile(_self, fileName, task):
def createModel():
print(' --- Creating model (Reading parameters...)')
_self.readModelCreation_params(fileName)
def trainModel():
print(' --- Training model (Reading parameters...)')
_self.readModelTraining_params(fileName)
def testModel():
print(' --- Testing model (Reading parameters...)')
_self.readModelTesting_params(fileName)
optionsParser = {0: createModel, 1: trainModel, 2: testModel}
optionsParser[task]()
def readModelCreation_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.networkName = ConfigIni.get('General', 'networkName')
_self.folderName = ConfigIni.get('General', 'folderName')
_self.n_classes = json.loads(ConfigIni.get('CNN_Architecture', 'n_classes'))
_self.layers = json.loads(ConfigIni.get('CNN_Architecture', 'numkernelsperlayer'))
_self.kernels = json.loads(ConfigIni.get('CNN_Architecture', 'kernelshapes'))
_self.intermediate_ConnectedLayers = json.loads(ConfigIni.get('CNN_Architecture', 'intermediateConnectedLayers'))
_self.pooling_scales = json.loads(ConfigIni.get('CNN_Architecture', 'pooling_scales'))
_self.dropout_Rates = json.loads(ConfigIni.get('CNN_Architecture', 'dropout_Rates'))
_self.activationType = json.loads(ConfigIni.get('CNN_Architecture', 'activationType'))
_self.weight_Initialization_CNN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_CNN'))
_self.weight_Initialization_FCN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_FCN'))
_self.weightsFolderName = ConfigIni.get('CNN_Architecture', 'weights folderName')
_self.weightsTrainedIdx = json.loads(ConfigIni.get('CNN_Architecture', 'weights trained indexes'))
_self.batch_size = json.loads(ConfigIni.get('Training Parameters', 'batch_size'))
_self.sampleSize_Train = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Train'))
_self.sampleSize_Test = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Test'))
_self.costFunction = json.loads(ConfigIni.get('Training Parameters', 'costFunction'))
_self.L1_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L1 Regularization Constant'))
_self.L2_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L2 Regularization Constant'))
_self.learning_rate = json.loads(ConfigIni.get('Training Parameters', 'Leraning Rate'))
_self.momentumType = json.loads(ConfigIni.get('Training Parameters', 'Momentum Type'))
_self.momentumValue = json.loads(ConfigIni.get('Training Parameters', 'Momentum Value'))
_self.momentumNormalized = json.loads(ConfigIni.get('Training Parameters', 'momentumNormalized'))
_self.optimizerType = json.loads(ConfigIni.get('Training Parameters', 'Optimizer Type'))
_self.rho_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Rho RMSProp'))
_self.epsilon_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Epsilon RMSProp'))
applyBatchNorm = json.loads(ConfigIni.get('Training Parameters', 'applyBatchNormalization'))
if (applyBatchNorm == 1):
_self.applyBatchNorm = True
else:
_self.applyBatchNorm = False
_self.BatchNormEpochs = json.loads(ConfigIni.get('Training Parameters', 'BatchNormEpochs'))
_self.tempSoftMax = json.loads(ConfigIni.get('Training Parameters', 'SoftMax temperature'))
def readModelTraining_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.imagesFolder = ConfigIni.get('Training Images', 'imagesFolder')
_self.imagesFolder_Bottom = ConfigIni.get('Training Images', 'imagesFolder_Bottom')
_self.GroundTruthFolder = ConfigIni.get('Training Images', 'GroundTruthFolder')
_self.ROIFolder = ConfigIni.get('Training Images', 'ROIFolder')
_self.indexesForTraining = json.loads(ConfigIni.get('Training Images', 'indexesForTraining'))
_self.indexesForValidation = json.loads(ConfigIni.get('Training Images', 'indexesForValidation'))
_self.imageTypesTrain = json.loads(ConfigIni.get('Training Images', 'imageTypes'))
_self.numberOfEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of Epochs'))
_self.numberOfSubEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of SubEpochs'))
_self.numberOfSamplesSupEpoch = json.loads(ConfigIni.get('Training Parameters', 'number of samples at each SubEpoch Train'))
_self.firstEpochChangeLR = json.loads(ConfigIni.get('Training Parameters', 'First Epoch Change LR'))
_self.frequencyChangeLR = json.loads(ConfigIni.get('Training Parameters', 'Frequency Change LR'))
_self.applyPadding = json.loads(ConfigIni.get('Training Parameters', 'applyPadding'))
def readModelTesting_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.imagesFolder = ConfigIni.get('Segmentation Images', 'imagesFolder')
_self.imagesFolder_Bottom = ConfigIni.get('Segmentation Images', 'imagesFolder_Bottom')
_self.GroundTruthFolder = ConfigIni.get('Segmentation Images', 'GroundTruthFolder')
_self.ROIFolder = ConfigIni.get('Segmentation Images', 'ROIFolder')
_self.imageTypes = json.loads(ConfigIni.get('Segmentation Images', 'imageTypes'))
_self.indexesToSegment = json.loads(ConfigIni.get('Segmentation Images', 'indexesToSegment'))
_self.applyPadding = json.loads(ConfigIni.get('Segmentation Images', 'applyPadding'))
|
def printUsage(error_type):
if (error_type == 1):
print(' ** ERROR!!: Few parameters used.')
else:
print(' ** ERROR!!: Asked to start with an already created network but its name is not specified.')
print(' ******** USAGE ******** ')
print(' --- argv 1: Name of the configIni file.')
print(' --- argv 2: Network model name')
|
def networkSegmentation(argv):
if (len(argv) < 2):
printUsage(1)
sys.exit()
configIniName = argv[0]
networkModelName = argv[1]
startTesting(networkModelName, configIniName)
print(' ***************** SEGMENTATION DONE!!! ***************** ')
|
def conv(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, BN=False, ws=False, activ=nn.LeakyReLU(0.2), gainWS=2):
convlayer = layer(nin, nout, kernel_size, stride=stride, padding=padding, bias=bias)
layers = []
if ws:
layers.append(WScaleLayer(convlayer, gain=gainWS))
if BN:
layers.append(nn.BatchNorm2d(nout))
if (activ is not None):
if (activ == nn.PReLU):
layers.append(activ(num_parameters=1))
else:
layers.append(activ)
layers.insert(ws, convlayer)
return nn.Sequential(*layers)
|
class ResidualConv(nn.Module):
def __init__(self, nin, nout, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
super(ResidualConv, self).__init__()
convs = [conv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ), conv(nout, nout, bias=bias, BN=BN, ws=ws, activ=None)]
self.convs = nn.Sequential(*convs)
res = []
if (nin != nout):
res.append(conv(nin, nout, kernel_size=1, padding=0, bias=False, BN=BN, ws=ws, activ=None))
self.res = nn.Sequential(*res)
activation = []
if (activ is not None):
if (activ == nn.PReLU):
activation.append(activ(num_parameters=1))
else:
activation.append(activ)
self.activation = nn.Sequential(*activation)
def forward(self, input):
out = self.convs(input)
return self.activation((out + self.res(input)))
|
def upSampleConv_Res(nin, nout, upscale=2, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
return nn.Sequential(nn.Upsample(scale_factor=upscale), ResidualConv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ))
|
def conv_block(in_dim, out_dim, act_fn, kernel_size=3, stride=1, padding=1, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def conv_block_1(in_dim, out_dim):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1), nn.BatchNorm2d(out_dim), nn.PReLU())
return model
|
def conv_block_Asym(in_dim, out_dim, kernelSize):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([2, 0])), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, 2])), nn.BatchNorm2d(out_dim), nn.PReLU())
return model
|
def conv_block_Asym_Inception(in_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=[kernel_size, 1], padding=tuple([(padding * dilation), 0]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU(), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, (padding * dilation)]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU())
return model
|
def conv_block_Asym_Inception_WithIncreasedFeatMaps(in_dim, mid_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, mid_dim, kernel_size=[kernel_size, 1], padding=tuple([(padding * dilation), 0]), dilation=(dilation, 1)), nn.BatchNorm2d(mid_dim), nn.ReLU(), nn.Conv2d(mid_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, (padding * dilation)]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU())
return model
|
def conv_block_Asym_ERFNet(in_dim, out_dim, kernelSize, padding, drop, dilation):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding, 0]), bias=True), nn.ReLU(), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding]), bias=True), nn.BatchNorm2d(out_dim, eps=0.001), nn.ReLU(), nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([(padding * dilation), 0]), bias=True, dilation=(dilation, 1)), nn.ReLU(), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, (padding * dilation)]), bias=True, dilation=(1, dilation)), nn.BatchNorm2d(out_dim, eps=0.001), nn.Dropout2d(drop))
return model
|
def conv_block_3_3(in_dim, out_dim):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1), nn.BatchNorm2d(out_dim), nn.PReLU())
return model
|
def conv_decod_block(in_dim, out_dim, act_fn):
model = nn.Sequential(nn.ConvTranspose2d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def dilation_conv_block(in_dim, out_dim, act_fn, stride_val, dil_val):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride_val, padding=1, dilation=dil_val), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def maxpool():
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
return pool
|
def avrgpool05():
pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
return pool
|
def avrgpool025():
pool = nn.AvgPool2d(kernel_size=2, stride=4, padding=0)
return pool
|
def avrgpool0125():
pool = nn.AvgPool2d(kernel_size=2, stride=8, padding=0)
return pool
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.