code stringlengths 17 6.64M |
|---|
def test_modal_datamodule_setup_train(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('fit')
assert (len(dm.train_dataset) == 80)
assert (len(dm.val_dataset) == 10)
with pytest.raises(AttributeError):
dm.test_dataset
|
def test_modal_datamodule_setup_val(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('validate')
assert (len(dm.val_dataset) == 10)
with pytest.raises(AttributeError):
dm.test_dataset
with pytest.raises(AttributeError):
dm.train_dataset
|
def test_modal_datamodule_setup_test(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('test')
assert (len(dm.test_dataset) == 10)
with pytest.raises(AttributeError):
dm.val_dataset
with pytest.raises(AttributeError):
dm.train_dataset
|
def test_modal_datamodule_train_data(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('fit')
train_loader = dm.train_dataloader()
assert isinstance(train_loader, DataLoader)
_ = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, dm.num_samples), dm.sample_rate))
(audio_batch,) = next(iter(train_loader))
assert (audio_batch.shape == (dm.batch_size, 1, dm.num_samples))
|
def test_modal_datamodule_audio_param_dataset_train(fs, mocker):
dm = kick_modal_datamodule(fs, mocker, batch_size=8, dataset_class=AudioWithParametersDataset, dataset_kwargs={'parameter_key': 'features'})
dm.setup('fit')
train_loader = dm.train_dataloader()
assert isinstance(train_loader, DataLoader)
mock_audio_load = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, dm.num_samples), dm.sample_rate))
mock_feature_load = mocker.patch(f'{TESTED_MODULE}.torch.load', return_value=torch.rand(3, 4, 10))
(audio_batch, parameters) = next(iter(train_loader))
assert (audio_batch.shape == (dm.batch_size, 1, dm.num_samples))
assert (parameters.shape == (dm.batch_size, 3, 4, 10))
assert (mock_audio_load.call_count == dm.batch_size)
assert (mock_feature_load.call_count == dm.batch_size)
|
def test_film_correctly_forwards_input():
batch_size = 11
in_channels = 13
seq_len = 31
film_embedding_size = 7
film = FiLM(film_embedding_size, in_channels)
x = torch.testing.make_tensor(batch_size, in_channels, seq_len, device='cpu', dtype=torch.float32)
film_embedding = torch.testing.make_tensor(batch_size, film_embedding_size, device='cpu', dtype=torch.float32, requires_grad=True)
y = film(x, film_embedding)
assert (y.shape == (batch_size, in_channels, seq_len))
(dy_dx,) = torch.autograd.grad(y.sum().square(), film_embedding)
assert (dy_dx.abs() > 0.0).all()
|
def test_film_can_toggle_batch_norm(mocker):
spy_batch_norm_init = mocker.spy(torch.nn.BatchNorm1d, '__init__')
spy_batch_norm_forward = mocker.spy(torch.nn.BatchNorm1d, 'forward')
batch_size = 7
in_channels = 13
seq_len = 37
film_embedding_size = 5
x = torch.testing.make_tensor(batch_size, in_channels, seq_len, device='cpu', dtype=torch.float32)
film_embedding = torch.testing.make_tensor(batch_size, film_embedding_size, device='cpu', dtype=torch.float32)
film = FiLM(film_embedding_size, in_channels, use_batch_norm=True)
film(x, film_embedding)
assert (spy_batch_norm_init.call_count == 1)
assert (spy_batch_norm_forward.call_count == 1)
film = FiLM(film_embedding_size, in_channels, use_batch_norm=False)
film(x, film_embedding)
assert (spy_batch_norm_init.call_count == 1)
assert (spy_batch_norm_forward.call_count == 1)
|
def test_gated_activation_correctly_forwards_input():
batch_size = 11
out_channels = 17
seq_len = 23
ga = GatedActivation()
x = torch.testing.make_tensor(batch_size, (out_channels * 2), seq_len, device='cpu', dtype=torch.float32)
y = ga(x)
assert (y.shape == (batch_size, out_channels, seq_len))
|
def test_gated_activation_gates_input():
batch_size = 11
out_channels = 17
seq_len = 23
ga = GatedActivation()
x_1 = torch.testing.make_tensor(batch_size, out_channels, seq_len, device='cpu', dtype=torch.float32)
x_2 = torch.testing.make_tensor(batch_size, out_channels, seq_len, device='cpu', dtype=torch.float32, low=(- 100000000.0), high=(- 100000000.0))
x = torch.cat([x_1, x_2], dim=1)
y = ga(x)
assert (y.shape == (batch_size, out_channels, seq_len))
assert (y.abs().sum() == 0.0)
|
def test_tfilm_correctly_forwards_input():
batch_size = 3
channels = 11
block_size = 16
seq_len = (block_size * 10)
tfilm = TFiLM(channels=channels, block_size=block_size)
x = torch.testing.make_tensor(batch_size, channels, seq_len, device='cpu', dtype=torch.float32)
y = tfilm(x)
assert (y.shape == (batch_size, channels, seq_len))
|
def test_dummy_parameter_encoder_can_be_instantiated():
model = DummyParameterEncoder((1, 1))
assert (model is not None)
|
def test_dummy_parameter_encoder_can_forward():
model = DummyParameterEncoder((1, 1))
(output, _) = model(torch.rand(1, 1))
assert (output.shape == (1, 1))
assert output.requires_grad
|
def test_modal_amp_parameters_can_forward():
batch_size = 7
num_params = 3
num_modes = 45
num_steps = 400
fake_modal_params = torch.rand(batch_size, num_params, num_modes, num_steps)
model = ModalAmpParameters((num_modes + 10))
(output, _) = model(None, fake_modal_params)
assert (output.shape == (batch_size, num_params, num_modes, num_steps))
|
def test_soundstream_attention_encoder_forwards(mocker):
batch_size = 3
input_channels = 1
hidden_channels = 2
output_channels = 12
x = torch.rand(batch_size, 1, 512)
encoder = SoundStreamAttentionEncoder(input_channels, hidden_channels, output_channels)
result = encoder(x)
assert (result.shape == (batch_size, output_channels))
|
@pytest.fixture
def noise_gen():
return NoiseGenerator(window_size=512)
|
def test_noise_generator_produces_correct_output_size(noise_gen):
hop_size = 256
batch_size = 16
frame_length = 690
num_filters = 120
x = torch.rand(batch_size, frame_length, num_filters)
y = noise_gen(x)
assert (y.shape == (batch_size, ((hop_size * (frame_length - 1)) + (hop_size * 2))))
|
class FakeLogger(pl.loggers.WandbLogger):
def __init__(self, stub):
self.stub = stub
def __getattr__(self, name: str) -> Any:
if (name == 'experiment'):
return self
else:
return super().__getattr__(name)
def log(self, *args, **kwargs):
self.stub(*args, **kwargs)
|
class FakeModule(pl.LightningModule):
def __init__(self, fake_logger):
super().__init__()
self.fake_logger = fake_logger
def __getattribute__(self, __name: str) -> Any:
if (__name == 'logger'):
return self.fake_logger
else:
return super().__getattribute__(__name)
def forward(self, x, *args, **kwargs):
return x
|
def test_callback_correctly_interleaves_audio(monkeypatch, mocker):
sample_rate = 48000
callback = LogAudioCallback(on_train=True, on_val=True, on_test=True, log_on_epoch_end=True, save_audio_sr=sample_rate)
log_stub = mocker.stub('logger')
logger = FakeLogger(log_stub)
model = FakeModule(logger)
trainer = None
callback.setup(trainer, model, 'fit')
FAKE_RETURN = 'fake return'
audio_patch = mocker.patch('drumblender.callbacks.Audio')
audio_patch.return_value = FAKE_RETURN
expected_output = torch.tensor([1, (- 1), 11, (- 11), 2, (- 2), 12, (- 12), 3, (- 3), 13, (- 13)]).numpy()
for i in range(1, 4):
fake_conditioning = torch.tensor([[[(- i)]], [[((- 10) - i)]]])
fake_targets = torch.tensor([[[i]], [[(10 + i)]]])
batch = (fake_targets, fake_conditioning)
callback.on_train_batch_start(trainer, model, batch, 0)
model(fake_conditioning)
callback.on_train_batch_end(trainer, model, 0.0, batch, 0)
callback.on_train_epoch_end(trainer, model)
assert (audio_patch.call_count == 1)
(actual_output,) = audio_patch.call_args.args
caption = audio_patch.call_args.kwargs['caption']
actual_sample_rate = audio_patch.call_args.kwargs['sample_rate']
assert (actual_output == expected_output).all()
assert (caption == 'train/audio')
assert (actual_sample_rate == sample_rate)
log_stub.assert_called_once_with({'train/audio': FAKE_RETURN})
|
def test_clean_wandb_cache_callback_cleans_wandb_cache(monkeypatch, mocker):
callback = CleanWandbCacheCallback(every_n_epochs=2, max_size_in_gb=1)
class FakeTrainer():
current_epoch: int = 0
trainer = FakeTrainer()
model = None
expected_args = ['wandb', 'artifact', 'cache', 'cleanup', '1GB']
fake_Popen = mocker.stub('subprocess.Popen')
monkeypatch.setattr(subprocess, 'Popen', fake_Popen)
for _ in range(4):
callback.on_train_epoch_end(trainer, model)
trainer.current_epoch += 1
fake_Popen.assert_has_calls(([mocker.call(expected_args)] * 2))
assert (fake_Popen.call_count == 2)
|
def test_save_config_callback_renames_correctly(mocker, fs):
def create_config_file(*args, **kwargs):
fs.create_file('not_experiment_dir/config.yaml')
class FakeExperiment():
dir = 'experiment_dir'
class FakeLogger(pl.loggers.WandbLogger):
def __init__(self, *args, **kwargs):
pass
@property
def experiment(self):
return FakeExperiment()
mock_init = mocker.patch('drumblender.callbacks.SaveConfigCallback.__init__', return_value=None)
mock_setup = mocker.patch('drumblender.callbacks.SaveConfigCallback.setup', side_effect=create_config_file)
class FakeTrainer():
logger = FakeLogger()
log_dir = 'not_experiment_dir'
trainer = FakeTrainer()
model = None
callback = SaveConfigCallbackWanb()
callback.setup(trainer, model, 'fit')
assert fs.exists('experiment_dir/model-config.yaml')
mock_init.assert_called_once()
mock_setup.assert_called_once()
|
def test_save_config_callback_just_calls_setup_for_non_wandb_logger(mocker, fs):
def create_config_file(*args, **kwargs):
fs.create_file('not_experiment_dir/config.yaml')
class FakeNonWandbLogger():
pass
mock_init = mocker.patch('drumblender.callbacks.SaveConfigCallback.__init__', return_value=None)
mock_setup = mocker.patch('drumblender.callbacks.SaveConfigCallback.setup', side_effect=create_config_file)
class FakeTrainer():
logger = FakeNonWandbLogger()
log_dir = 'not_experiment_dir'
trainer = FakeTrainer()
model = None
callback = SaveConfigCallbackWanb()
callback.setup(trainer, model, 'fit')
assert fs.exists('not_experiment_dir/config.yaml')
mock_init.assert_called_once()
mock_setup.assert_called_once()
|
def test_first_order_difference_loss():
loss_fn = loss.FirstOrderDifferenceLoss()
pred = torch.ones(1, 1, 100)
target = torch.ones(1, 1, 100)
assert (loss_fn(pred, target) == 0.0)
|
def test_weighted_loss_forwards():
loss_fn = loss.WeightedLoss([torch.nn.L1Loss(), torch.nn.L1Loss()], weights=[2.0, 1.0])
pred = torch.ones(1, 1, 100)
target = torch.zeros(1, 1, 100)
assert (loss_fn(pred, target) == 3.0)
|
def test_weighted_loss_forwards_no_weights():
loss_fn = loss.WeightedLoss([torch.nn.L1Loss(), torch.nn.L1Loss()])
pred = torch.ones(1, 1, 100)
target = torch.zeros(1, 1, 100)
assert (loss_fn(pred, target) == 2.0)
|
def test_weighted_loss_different_weights():
with pytest.raises(AssertionError):
loss.WeightedLoss([torch.nn.L1Loss()], weights=[2.0, 1.0])
|
def test_weighted_loss_with_jsonargparse_config(monkeypatch):
monkeypatch.setattr(torch.nn.L1Loss, 'forward', (lambda self, x, y: 1.0))
monkeypatch.setattr(torch.nn.MSELoss, 'forward', (lambda self, x, y: 20.0))
expected_loss = 4.0
config = 'loss:\n class_path: drumblender.loss.WeightedLoss\n init_args:\n loss_fns: \n - class_path: torch.nn.L1Loss\n init_args:\n reduction: mean\n - class_path: torch.nn.MSELoss\n init_args:\n reduction: sum\n weights: [2.0, 0.1]'
parser = jsonargparse.ArgumentParser()
parser.add_argument('--loss', type=torch.nn.Module)
args = parser.parse_string(config)
objs = parser.instantiate_classes(args)
shape = (13, 4, 9, 2)
a = torch.testing.make_tensor(*shape, dtype=torch.float32, device='cpu')
b = torch.testing.make_tensor(*shape, dtype=torch.float32, device='cpu')
actual_loss = objs.loss(a, b)
assert (actual_loss == expected_loss)
|
def test_drumblender_can_be_instantiated(mocker):
modal_synth = mocker.stub('modal_synth')
loss_fn = mocker.stub('loss_fn')
model = DrumBlender(modal_synth=modal_synth, loss_fn=loss_fn)
assert (model is not None)
assert (model.modal_synth == modal_synth)
assert (model.loss_fn == loss_fn)
|
def test_drumblender_can_forward_modal(mocker):
class FakeSynth(torch.nn.Module):
def __init__(self, output):
super().__init__()
self.output = output
def forward(self, p, length=None):
return self.output
loss_fn = mocker.stub('loss_fn')
expected_output = torch.rand(1, 1)
modal_synth = FakeSynth(expected_output)
modal_spy = mocker.spy(modal_synth, 'forward')
batch_size = 7
num_params = 3
num_modes = 45
num_steps = 400
x = torch.rand(batch_size, 1, 1)
p = torch.rand(batch_size, num_params, num_modes, num_steps)
model = DrumBlender(modal_synth=modal_synth, loss_fn=loss_fn)
y = model(x, p)
assert (y == expected_output)
modal_spy.assert_called_once_with(p, x.size((- 1)))
|
def test_drumblender_forwards_all(mocker):
class FakeModule(torch.nn.Module):
def __init__(self, output):
super().__init__()
self.output = output
def forward(self, *args):
return self.output
batch_size = 7
num_samples = 1024
num_params = 3
num_modes = 45
num_steps = 400
embedding_size = 12
latent_size = 3
loss_fn = mocker.stub('loss_fn')
expected_encoder_output = torch.rand(batch_size, embedding_size)
encoder = FakeModule(expected_encoder_output)
encoder_spy = mocker.spy(encoder, 'forward')
expected_modal_encoder_output = (torch.rand(batch_size, embedding_size), torch.rand(batch_size, latent_size))
modal_encoder = FakeModule(expected_modal_encoder_output)
modal_encoder_spy = mocker.spy(modal_encoder, 'forward')
expected_noise_encoder_output = (torch.rand(batch_size, embedding_size), torch.rand(batch_size, latent_size))
noise_encoder = FakeModule(expected_noise_encoder_output)
noise_encoder_spy = mocker.spy(noise_encoder, 'forward')
expected_transient_encoder_output = (torch.rand(batch_size, embedding_size), torch.rand(batch_size, latent_size))
transient_encoder = FakeModule(expected_transient_encoder_output)
transient_encoder_spy = mocker.spy(transient_encoder, 'forward')
expected_modal_output = torch.rand(batch_size, 1, num_samples)
modal_synth = FakeModule(expected_modal_output)
modal_spy = mocker.spy(modal_synth, 'forward')
expected_noise_output = torch.rand(batch_size, num_samples)
noise_synth = FakeModule(expected_noise_output)
noise_spy = mocker.spy(noise_synth, 'forward')
expected_transient_output = torch.rand(batch_size, 1, num_samples)
transient_synth = FakeModule(expected_transient_output)
transient_spy = mocker.spy(transient_synth, 'forward')
x = torch.rand(batch_size, 1, num_samples)
p = torch.rand(batch_size, num_params, num_modes, num_steps)
model = DrumBlender(loss_fn=loss_fn, encoder=encoder, modal_autoencoder=modal_encoder, noise_autoencoder=noise_encoder, transient_autoencoder=transient_encoder, modal_synth=modal_synth, noise_synth=noise_synth, transient_synth=transient_synth, transient_takes_noise=True)
y = model(x, p)
encoder_spy.assert_called_once_with(x)
modal_encoder_spy.assert_called_once_with(expected_encoder_output, p)
noise_encoder_spy.assert_called_once_with(expected_encoder_output)
transient_encoder_spy.assert_called_once_with(expected_encoder_output)
modal_spy.assert_called_once_with(expected_modal_encoder_output[0], x.size((- 1)))
noise_spy.assert_called_once_with(expected_noise_encoder_output[0], x.size((- 1)))
transient_input = (expected_modal_output + rearrange(expected_noise_output, 'b t -> b () t'))
torch.testing.assert_close(transient_spy.call_args_list[0][0][0], transient_input)
torch.testing.assert_close(transient_spy.call_args_list[0][0][1], expected_transient_encoder_output[0])
assert torch.all((y == expected_transient_output))
|
def preprocess_audio_file(path_factory, in_sr=16000, out_sr=16000, in_dur=1.0, out_dur=1.0, in_stereo=False, amp=1.0):
n = int((in_dur * in_sr))
audio = (audio_utils.generate_sine_wave(440, n, in_sr, in_stereo) * amp)
input_file = (path_factory.mktemp('data') / 'test_input.wav')
torchaudio.save(input_file, audio, in_sr)
assert ((audio.shape[0] == 2) if in_stereo else 1)
output_file = (path_factory.mktemp('data') / 'test_preprocessed.wav')
audio_utils.preprocess_audio_file(input_file=input_file, output_file=output_file, sample_rate=out_sr, num_samples=int((out_dur * out_sr)))
assert output_file.exists()
return output_file
|
def test_preprocess_audio_file_noresample(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 16000
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
|
def test_preprocess_audio_file_resample(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
|
def test_preprocess_audio_file_resample_stereo(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate, in_stereo=True)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
assert (waveform.shape[0] == 1)
|
def test_preprocess_audio_file_resample_pad(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
input_duration = 1.0
target_duration = 2.0
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate, in_dur=input_duration, out_dur=target_duration)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
assert (waveform.shape[1] == int((target_duration * target_sample_rate)))
|
def test_preprocess_audio_file_resample_trim(tmp_path_factory):
input_sample_rate = 16000
target_sample_rate = 48000
input_duration = 1.0
target_duration = 0.5
output_file = preprocess_audio_file(tmp_path_factory, in_sr=input_sample_rate, out_sr=target_sample_rate, in_dur=input_duration, out_dur=target_duration)
(waveform, sample_rate) = torchaudio.load(output_file)
assert (sample_rate == target_sample_rate)
assert (waveform.shape[1] == int((target_duration * target_sample_rate)))
|
def test_preprocess_audio_file_raises_warning_on_quiet_sound(tmp_path_factory):
with pytest.raises(ValueError, match='Entire wavfile below threshold level'):
preprocess_audio_file(tmp_path_factory, amp=1e-06)
|
def test_first_non_silent_sample_returns_correct_sample():
waveform = torch.zeros(1000)
waveform[500:] = 1.0
first_non_silent_sample = audio_utils.first_non_silent_sample(waveform, frame_size=100, hop_size=100)
assert (first_non_silent_sample == 500)
|
def test_first_non_silent_sample_thresholding_works_correctly():
threshold_db = (- 20.0)
waveform = torch.zeros(1000)
waveform[:500] = (np.power(10.0, (threshold_db / 20.0)) * 0.99)
waveform[500:] = (np.power(10.0, (threshold_db / 20.0)) * 1.01)
first_non_silent_sample = audio_utils.first_non_silent_sample(waveform, frame_size=100, hop_size=100, threshold_db=threshold_db)
assert (first_non_silent_sample == 500)
|
def test_first_non_silent_sample_below_threshold_returns_none():
threshold_db = (- 20.0)
waveform = ((torch.ones(1000) * np.power(10.0, (threshold_db / 20.0))) * 0.99)
first_non_silent_sample = audio_utils.first_non_silent_sample(waveform, frame_size=100, hop_size=100, threshold_db=threshold_db)
assert (first_non_silent_sample is None)
|
def test_cut_start_silence_raises_error_for_incorrect_tensor_shape():
waveform = torch.zeros(1000)
with pytest.raises(AssertionError):
audio_utils.cut_start_silence(waveform)
|
def test_cut_start_silence_clips_tensor(mocker):
mock = mocker.patch('drumblender.utils.audio.first_non_silent_sample')
mock.side_effect = [200, 100]
waveform = torch.zeros(2, 1000)
waveform = audio_utils.cut_start_silence(waveform)
assert (waveform.shape == (2, 900))
|
def test_cut_start_silence_raises_error_for_silent_input(mocker):
_ = mocker.patch('drumblender.utils.audio.first_non_silent_sample', return_value=None)
waveform = torch.zeros(1, 1000)
with pytest.raises(ValueError):
audio_utils.cut_start_silence(waveform)
|
def test_modal_analysis_init():
sample_rate = 48000
x = modal_analysis.CQTModalAnalysis(sample_rate)
assert (x.sample_rate == sample_rate)
|
def test_modal_analysis_spectrogram():
sample_rate = 48000
hop_length = 256
num_bins = 64
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=hop_length, n_bins=num_bins)
waveform = torch.randn(1, 48000)
spec = x.spectrogram(waveform)
num_frames = ((waveform.shape[1] // hop_length) + 1)
assert (spec.shape == (1, num_bins, num_frames, 2))
spec = x.spectrogram(waveform, complex=False)
assert (spec.shape == (1, num_bins, num_frames))
|
def test_modal_analysis_modal_tracking():
sample_rate = 48000
hop_length = 256
num_bins = 64
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=hop_length, n_bins=num_bins)
freq_bin = 36
waveform = audio_utils.generate_sine_wave(x.frequencies()[freq_bin], 48000, sample_rate)
spec = x.spectrogram(waveform, complex=True)
spec = spec[0].numpy()
(freqs, amps, phases) = x.modal_tracking(spec)
max_track = 0
max_i = 0
for (i, track) in enumerate(amps):
if (sum(track) > max_track):
max_track = sum(track)
max_i = i
assert np.isclose(np.mean(freqs[max_i]), 36, atol=0.025)
|
def test_modal_analysis_create_modal_tensors():
sample_rate = 48000
hop_length = 256
num_bins = 64
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=hop_length, n_bins=num_bins)
assert (x.sample_rate == sample_rate)
|
def test_modal_analysis_call():
sample_rate = 16000
waveform = audio_utils.generate_sine_wave(440, num_samples=sample_rate, sample_rate=sample_rate)
x = modal_analysis.CQTModalAnalysis(sample_rate, hop_length=256, n_bins=60, min_length=10, num_modes=1, threshold=(- 80.0))
(freqs, amps, phases) = x(waveform)
expected_hops = ((waveform.shape[1] // 256) + 1)
assert (freqs.shape == (1, 1, expected_hops))
assert (amps.shape == (1, 1, expected_hops))
assert (phases.shape == (1, 1, expected_hops))
|
def conv(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, BN=False, ws=False, activ=nn.LeakyReLU(0.2), gainWS=2):
convlayer = layer(nin, nout, kernel_size, stride=stride, padding=padding, bias=bias)
layers = []
if ws:
layers.append(WScaleLayer(convlayer, gain=gainWS))
if BN:
layers.append(nn.BatchNorm2d(nout))
if (activ is not None):
if (activ == nn.PReLU):
layers.append(activ(num_parameters=1))
else:
layers.append(activ)
layers.insert(ws, convlayer)
return nn.Sequential(*layers)
|
class ResidualConv(nn.Module):
def __init__(self, nin, nout, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
super(ResidualConv, self).__init__()
convs = [conv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ), conv(nout, nout, bias=bias, BN=BN, ws=ws, activ=None)]
self.convs = nn.Sequential(*convs)
res = []
if (nin != nout):
res.append(conv(nin, nout, kernel_size=1, padding=0, bias=False, BN=BN, ws=ws, activ=None))
self.res = nn.Sequential(*res)
activation = []
if (activ is not None):
if (activ == nn.PReLU):
activation.append(activ(num_parameters=1))
else:
activation.append(activ)
self.activation = nn.Sequential(*activation)
def forward(self, input):
out = self.convs(input)
return self.activation((out + self.res(input)))
|
def upSampleConv_Res(nin, nout, upscale=2, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
return nn.Sequential(nn.Upsample(scale_factor=upscale), ResidualConv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ))
|
def conv_block(in_dim, out_dim, act_fn, kernel_size=3, stride=1, padding=1, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def conv_block_1(in_dim, out_dim):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1), nn.BatchNorm2d(out_dim), nn.PReLU())
return model
|
def conv_block_Asym(in_dim, out_dim, kernelSize):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([2, 0])), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, 2])), nn.BatchNorm2d(out_dim), nn.PReLU())
return model
|
def conv_block_Asym_Inception(in_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=[kernel_size, 1], padding=tuple([(padding * dilation), 0]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU(), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, (padding * dilation)]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU())
return model
|
def conv_block_Asym_Inception_WithIncreasedFeatMaps(in_dim, mid_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, mid_dim, kernel_size=[kernel_size, 1], padding=tuple([(padding * dilation), 0]), dilation=(dilation, 1)), nn.BatchNorm2d(mid_dim), nn.ReLU(), nn.Conv2d(mid_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, (padding * dilation)]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU())
return model
|
def conv_block_Asym_ERFNet(in_dim, out_dim, kernelSize, padding, drop, dilation):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding, 0]), bias=True), nn.ReLU(), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding]), bias=True), nn.BatchNorm2d(out_dim, eps=0.001), nn.ReLU(), nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([(padding * dilation), 0]), bias=True, dilation=(dilation, 1)), nn.ReLU(), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, (padding * dilation)]), bias=True, dilation=(1, dilation)), nn.BatchNorm2d(out_dim, eps=0.001), nn.Dropout2d(drop))
return model
|
def conv_block_3_3(in_dim, out_dim):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1), nn.BatchNorm2d(out_dim), nn.PReLU())
return model
|
def conv_decod_block(in_dim, out_dim, act_fn):
model = nn.Sequential(nn.ConvTranspose2d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def dilation_conv_block(in_dim, out_dim, act_fn, stride_val, dil_val):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride_val, padding=1, dilation=dil_val), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def maxpool():
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
return pool
|
def avrgpool05():
pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
return pool
|
def avrgpool025():
pool = nn.AvgPool2d(kernel_size=2, stride=4, padding=0)
return pool
|
def avrgpool0125():
pool = nn.AvgPool2d(kernel_size=2, stride=8, padding=0)
return pool
|
def maxpool_1_4():
pool = nn.MaxPool2d(kernel_size=2, stride=4, padding=0)
return pool
|
def maxpool_1_8():
pool = nn.MaxPool2d(kernel_size=2, stride=8, padding=0)
return pool
|
def maxpool_1_16():
pool = nn.MaxPool2d(kernel_size=2, stride=16, padding=0)
return pool
|
def maxpool_1_32():
pool = nn.MaxPool2d(kernel_size=2, stride=32, padding=0)
|
def conv_block_3(in_dim, out_dim, act_fn):
model = nn.Sequential(conv_block(in_dim, out_dim, act_fn), conv_block(out_dim, out_dim, act_fn), nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_dim))
return model
|
def classificationNet(D_in):
H = 400
D_out = 1
model = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(), torch.nn.Linear(H, int((H / 4))), torch.nn.ReLU(), torch.nn.Linear(int((H / 4)), D_out))
return model
|
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='=', empty=' ', tip='>', begin='[', end=']', done='[DONE]', clear=True):
'\n Print iterations progress.\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration [int]\n total - Required : total iterations [int]\n prefix - Optional : prefix string [str]\n suffix - Optional : suffix string [str]\n decimals - Optional : positive number of decimals in percent [int]\n length - Optional : character length of bar [int]\n fill - Optional : bar fill character [str] (ex: \'â– \', \'â–ˆ\', \'#\', \'=\')\n empty - Optional : not filled bar character [str] (ex: \'-\', \' \', \'•\')\n tip - Optional : character at the end of the fill bar [str] (ex: \'>\', \'\')\n begin - Optional : starting bar character [str] (ex: \'|\', \'â–•\', \'[\')\n end - Optional : ending bar character [str] (ex: \'|\', \'â–\x8f\', \']\')\n done - Optional : display message when 100% is reached [str] (ex: "[DONE]")\n clear - Optional : display completion message or leave as is [str]\n '
percent = (('{0:.' + str(decimals)) + 'f}').format((100 * (iteration / float(total))))
filledLength = int(((length * iteration) // total))
bar = (fill * filledLength)
if (iteration != total):
bar = (bar + tip)
bar = (bar + (empty * ((length - filledLength) - len(tip))))
display = '\r{prefix}{begin}{bar}{end} {percent}%{suffix}'.format(prefix=prefix, begin=begin, bar=bar, end=end, percent=percent, suffix=suffix)
(print(display, end=''),)
if (iteration == total):
if clear:
finish = '\r{prefix}{done}'.format(prefix=prefix, done=done)
if hasattr(str, 'decode'):
finish = finish.decode('utf-8')
display = display.decode('utf-8')
clear = (' ' * max((len(display) - len(finish)), 0))
print((finish + clear))
else:
print('')
|
def verbose(verboseLevel, requiredLevel, printFunc=print, *printArgs, **kwPrintArgs):
'\n Calls `printFunc` passing it `printArgs` and `kwPrintArgs`\n only if `verboseLevel` meets the `requiredLevel` of verbosity.\n\n Following forms are supported:\n\n > verbose(1, 0, "message")\n\n >> message\n\n > verbose(1, 0, "message1", "message2")\n\n >> message1 message2\n\n > verbose(1, 2, "message")\n\n >> <nothing since verbosity level not high enough>\n\n > verbose(1, 1, lambda x: print(\'MSG: \' + x), \'message\')\n\n >> MSG: message\n\n > def myprint(x, y="msg_y", z=True): print(\'MSG_Y: \' + y) if z else print(\'MSG_X: \' + x)\n > verbose(1, 1, myprint, "msg_x", "msg_y")\n\n >> MSG_Y: msg_y\n\n > verbose(1, 1, myprint, "msg_x", "msg_Y!", z=True)\n\n >> MSG_Y: msg_Y!\n\n > verbose(1, 1, myprint, "msg_x", z=False)\n\n >> MSG_X: msg_x\n\n > verbose(1, 1, myprint, "msg_x", z=True)\n\n >> MSG_Y: msg_y\n '
if (verboseLevel >= requiredLevel):
printArgs = (printArgs if (printArgs is not None) else tuple(['']))
if (not hasattr(printFunc, '__call__')):
printArgs = (tuple([printFunc]) + printArgs)
printFunc = print
printFunc(*printArgs, **kwPrintArgs)
|
def print_flush(txt=''):
print(txt)
sys.stdout.flush()
|
def hide_cursor():
if (os.name == 'nt'):
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle((- 11))
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = False
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif (os.name == 'posix'):
sys.stdout.write('\x1b[?25l')
sys.stdout.flush()
|
def show_cursor():
if (os.name == 'nt'):
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle((- 11))
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = True
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif (os.name == 'posix'):
sys.stdout.write('\x1b[?25h')
sys.stdout.flush()
|
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dirate=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=(1 * dirate), dilation=(1 * dirate))
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
|
def _upsample_like(src, tar):
src = F.upsample(src, size=tar.shape[2:], mode='bilinear')
return src
|
class RSU7(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv6d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv5d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv4d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv3d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv2d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv1d = REBNCONV((mid_ch * 2), out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))
hx6dup = _upsample_like(hx6d, hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return (hx1d + hxin)
|
class RSU6(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv5d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv4d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv3d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv2d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv1d = REBNCONV((mid_ch * 2), out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return (hx1d + hxin)
|
class RSU5(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv4d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv3d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv2d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv1d = REBNCONV((mid_ch * 2), out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return (hx1d + hxin)
|
class RSU4(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv2d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv1d = REBNCONV((mid_ch * 2), out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return (hx1d + hxin)
|
class RSU4F(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8)
self.rebnconv3d = REBNCONV((mid_ch * 2), mid_ch, dirate=4)
self.rebnconv2d = REBNCONV((mid_ch * 2), mid_ch, dirate=2)
self.rebnconv1d = REBNCONV((mid_ch * 2), out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1))
hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1))
return (hx1d + hxin)
|
class U2NET(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(U2NET, self).__init__()
self.stage1 = RSU7(in_ch, 32, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = RSU6(64, 32, 128)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = RSU5(128, 64, 256)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = RSU4(256, 128, 512)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = RSU4F(512, 256, 512)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = RSU4F(512, 256, 512)
self.stage5d = RSU4F(1024, 256, 512)
self.stage4d = RSU4(1024, 128, 256)
self.stage3d = RSU5(512, 64, 128)
self.stage2d = RSU6(256, 32, 64)
self.stage1d = RSU7(128, 16, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6, hx5)
hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2, d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3, d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4, d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5, d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6, d1)
d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))
return (F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6))
|
class U2NETP(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(U2NETP, self).__init__()
self.stage1 = RSU7(in_ch, 16, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = RSU6(64, 16, 64)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = RSU5(64, 16, 64)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = RSU4(64, 16, 64)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = RSU4F(64, 16, 64)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = RSU4F(64, 16, 64)
self.stage5d = RSU4F(128, 16, 64)
self.stage4d = RSU4(128, 16, 64)
self.stage3d = RSU5(128, 16, 64)
self.stage2d = RSU6(128, 16, 64)
self.stage1d = RSU7(128, 16, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(64, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6, hx5)
hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2, d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3, d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4, d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5, d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6, d1)
d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))
return (F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6))
|
class PLN(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(PLN, self).__init__()
self.rebnconvin = REBNCONV(in_ch, mid_ch, dirate=1)
self.rebnconvout = REBNCONV(mid_ch, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hxout = self.rebnconvout(hxin)
return hxout
|
class U2NETP_short(nn.Module):
def __init__(self, in_ch=3, out_ch=1, levels=6):
super(U2NETP_short, self).__init__()
self.stage1 = PLN(in_ch, 16, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = PLN(64, 16, 64)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = PLN(64, 16, 64)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = PLN(64, 16, 64)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = PLN(64, 16, 64)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = PLN(64, 16, 64)
self.stage5d = PLN(128, 16, 64)
self.stage4d = PLN(128, 16, 64)
self.stage3d = PLN(128, 16, 64)
self.stage2d = PLN(128, 16, 64)
self.stage1d = PLN(128, 16, 64)
self.side1 = nn.Conv2d(64, 1, 3, padding=1)
self.side2 = nn.Conv2d(64, 1, 3, padding=1)
self.side3 = nn.Conv2d(64, 1, 3, padding=1)
self.side4 = nn.Conv2d(64, 1, 3, padding=1)
self.side5 = nn.Conv2d(64, 1, 3, padding=1)
self.side6 = nn.Conv2d(64, 1, 3, padding=1)
self.upscore6 = nn.Upsample(scale_factor=32, mode='bilinear')
self.upscore5 = nn.Upsample(scale_factor=16, mode='bilinear')
self.upscore4 = nn.Upsample(scale_factor=8, mode='bilinear')
self.upscore3 = nn.Upsample(scale_factor=4, mode='bilinear')
self.upscore2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.outconv = nn.Conv2d(levels, 1, 1)
self.levels = levels
def forward(self, x):
hx = x
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
hx5 = self.stage5(hx)
if (self.levels >= 6):
hx = self.pool56(hx5)
hx6 = self.stage6(hx)
hx6up = self.upscore2(hx6)
hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
hx5dup = self.upscore2(hx5d)
else:
hx5dup = self.upscore2(hx5)
hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
hx4dup = self.upscore2(hx4d)
hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
hx3dup = self.upscore2(hx3d)
hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
hx2dup = self.upscore2(hx2d)
hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = self.upscore2(d2)
d3 = self.side3(hx3d)
d3 = self.upscore3(d3)
d4 = self.side4(hx4d)
d4 = self.upscore4(d4)
d5 = self.side5(hx5)
d5 = self.upscore5(d5)
if (self.levels == 6):
d6 = self.side6(hx6)
d6 = self.upscore6(d6)
d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))
return (F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6))
d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5), 1))
return (F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), None)
|
def muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, labels_v):
loss0 = bce_loss(d0, labels_v)
loss1 = bce_loss(d1, labels_v)
loss2 = bce_loss(d2, labels_v)
loss3 = bce_loss(d3, labels_v)
loss4 = bce_loss(d4, labels_v)
loss5 = bce_loss(d5, labels_v)
loss6 = bce_loss(d6, labels_v)
loss = ((((((loss0 + loss1) + loss2) + loss3) + loss4) + loss5) + loss6)
print(('l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n' % (loss0.data[0], loss1.data[0], loss2.data[0], loss3.data[0], loss4.data[0], loss5.data[0], loss6.data[0])))
return (loss0, loss)
|
class LiviaSoftmax(LiviaNet3DConvLayer):
' Final Classification layer with Softmax '
def __init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType=0, dropoutRate=0.0, softmaxTemperature=1.0):
LiviaNet3DConvLayer.__init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType, dropoutRate)
self._numberOfOutputClasses = None
self._bClassLayer = None
self._softmaxTemperature = None
self._numberOfOutputClasses = filterShape[0]
self._softmaxTemperature = softmaxTemperature
outputOfConvTrain = self.outputTrain
outputOfConvTest = self.outputTest
outputOfConvShapeTrain = self.outputShapeTrain
outputOfConvShapeTest = self.outputShapeTest
b_values = np.zeros(self._numberOfFeatureMaps, dtype='float32')
self._bClassLayer = theano.shared(value=b_values, borrow=True)
inputToSoftmaxTrain = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTrain)
inputToSoftmaxTest = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTest)
self.params = (self.params + [self._bClassLayer])
(self.p_y_given_x_train, self.y_pred_train) = applySoftMax(inputToSoftmaxTrain, outputOfConvShapeTrain, self._numberOfOutputClasses, softmaxTemperature)
(self.p_y_given_x_test, self.y_pred_test) = applySoftMax(inputToSoftmaxTest, outputOfConvShapeTest, self._numberOfOutputClasses, softmaxTemperature)
def negativeLogLikelihoodWeighted(self, y, weightPerClass):
e1 = np.finfo(np.float32).tiny
addTinyProbMatrix = (T.lt(self.p_y_given_x_train, (4 * e1)) * e1)
weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x')
log_p_y_given_x_train = T.log((self.p_y_given_x_train + addTinyProbMatrix))
weighted_log_probs = (log_p_y_given_x_train * weights)
wShape = weighted_log_probs.shape
idx0 = T.arange(wShape[0]).dimshuffle(0, 'x', 'x', 'x')
idx2 = T.arange(wShape[2]).dimshuffle('x', 0, 'x', 'x')
idx3 = T.arange(wShape[3]).dimshuffle('x', 'x', 0, 'x')
idx4 = T.arange(wShape[4]).dimshuffle('x', 'x', 'x', 0)
return (- T.mean(weighted_log_probs[(idx0, y, idx2, idx3, idx4)]))
def predictionProbabilities(self):
return self.p_y_given_x_test
|
def computeDice(autoSeg, groundTruth):
' Returns\n -------\n DiceArray : floats array\n \n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0 '
n_classes = int((np.max(groundTruth) + 1))
DiceArray = []
for c_i in xrange(1, n_classes):
idx_Auto = np.where((autoSeg.flatten() == c_i))[0]
idx_GT = np.where((groundTruth.flatten() == c_i))[0]
autoArray = np.zeros(autoSeg.size, dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size, dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray)
DiceArray.append(dsc)
return DiceArray
|
def dice(im1, im2):
'\n Computes the Dice coefficient\n ----------\n im1 : boolean array\n im2 : boolean array\n \n If they are not boolean, they will be converted.\n \n -------\n It returns the Dice coefficient as a float on the range [0,1].\n 1: Perfect overlapping \n 0: Not overlapping \n '
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if (im1.size != im2.size):
raise ValueError('Size mismatch between input arrays!!!')
im_sum = (im1.sum() + im2.sum())
if (im_sum == 0):
return 1.0
intersection = np.logical_and(im1, im2)
return ((2.0 * intersection.sum()) / im_sum)
|
def applyActivationFunction_Sigmoid(inputData):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
outputData = T.nnet.sigmoid(inputData)
return outputData
|
def applyActivationFunction_Tanh(inputData):
'inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2])'
outputData = T.tanh(inputData)
return outputData
|
def applyActivationFunction_ReLU_v1(inputData):
' inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2]) '
return T.maximum(inputData, 0)
|
def applyActivationFunction_ReLU_v2(inputData):
return T.switch((inputData < 0.0), 0.0, inputData)
|
def applyActivationFunction_ReLU_v3(inputData):
return ((inputData + abs(inputData)) / 2.0)
|
def applyActivationFunction_ReLU_v4(inputData):
return (((T.sgn(inputData) + 1) * inputData) * 0.5)
|
def applyActivationFunction_LeakyReLU(inputData, leakiness):
'leakiness : float\n Slope for negative input, usually between 0 and 1.\n A leakiness of 0 will lead to the standard rectifier,\n a leakiness of 1 will lead to a linear activation function,\n and any value in between will give a leaky rectifier.\n \n [1] Maas et al. (2013):\n Rectifier Nonlinearities Improve Neural Network Acoustic Models,\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n \n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
pos = (0.5 * (1 + leakiness))
neg = (0.5 * (1 - leakiness))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
def applyActivationFunction_PReLU(inputData, PreluActivations):
'Parametric Rectified Linear Unit.\n It follows:\n `f(x) = alpha * x for x < 0`,\n `f(x) = x for x >= 0`,\n where `alpha` is a learned array with the same shape as x.\n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = T.maximum(0, inputData)
neg = ((preluActivationsAsRow * (inputData - abs(inputData))) * 0.5)
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v2(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = ((inputData + abs(inputData)) / 2.0)
neg = (preluActivationsAsRow * ((inputData - abs(inputData)) / 2.0))
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v3(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = (0.5 * (1 + preluActivationsAsRow))
neg = (0.5 * (1 - preluActivationsAsRow))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.