|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import pytest |
|
|
import torch |
|
|
|
|
|
from nemo.collections.asr.modules.conformer_encoder import ConformerEncoder |
|
|
|
|
|
|
|
|
class TestStochasticDepth: |
|
|
"""Testing stochastic depth functionality.""" |
|
|
|
|
|
def test_stochastic_depth_model_creation(self): |
|
|
"""Testing basic model creation and the drop probs are correctly assigned.""" |
|
|
n_layers = 4 |
|
|
model = ConformerEncoder(feat_in=10, n_layers=n_layers, d_model=4, feat_out=8) |
|
|
|
|
|
|
|
|
assert model.layer_drop_probs == [0.0] * n_layers |
|
|
|
|
|
|
|
|
for drop_prob in [0.3, 0.5, 0.9]: |
|
|
for start_layer in [1, 3]: |
|
|
model = ConformerEncoder( |
|
|
feat_in=10, |
|
|
n_layers=n_layers, |
|
|
d_model=4, |
|
|
feat_out=8, |
|
|
stochastic_depth_drop_prob=drop_prob, |
|
|
stochastic_depth_start_layer=start_layer, |
|
|
) |
|
|
L = n_layers - start_layer |
|
|
assert model.layer_drop_probs == [0.0] * start_layer + [drop_prob * l / L for l in range(1, L + 1)] |
|
|
|
|
|
|
|
|
for drop_prob in [0.3, 0.5, 0.9]: |
|
|
model = ConformerEncoder( |
|
|
feat_in=10, |
|
|
n_layers=n_layers, |
|
|
d_model=4, |
|
|
feat_out=8, |
|
|
stochastic_depth_drop_prob=drop_prob, |
|
|
stochastic_depth_mode="uniform", |
|
|
stochastic_depth_start_layer=start_layer, |
|
|
) |
|
|
L = n_layers - start_layer |
|
|
assert model.layer_drop_probs == [0.0] * start_layer + [drop_prob] * L |
|
|
|
|
|
|
|
|
for drop_prob in [-1.0, 1.0]: |
|
|
with pytest.raises(ValueError, match="stochastic_depth_drop_prob has to be in"): |
|
|
ConformerEncoder( |
|
|
feat_in=10, |
|
|
n_layers=n_layers, |
|
|
d_model=4, |
|
|
feat_out=8, |
|
|
stochastic_depth_drop_prob=drop_prob, |
|
|
stochastic_depth_mode="uniform", |
|
|
) |
|
|
|
|
|
with pytest.raises(ValueError, match="stochastic_depth_mode has to be one of"): |
|
|
ConformerEncoder(feat_in=10, n_layers=n_layers, d_model=4, feat_out=8, stochastic_depth_mode="weird") |
|
|
|
|
|
for start_layer in [-1, 0, 5]: |
|
|
with pytest.raises(ValueError, match="stochastic_depth_start_layer has to be in"): |
|
|
ConformerEncoder( |
|
|
feat_in=10, n_layers=n_layers, d_model=4, feat_out=8, stochastic_depth_start_layer=start_layer, |
|
|
) |
|
|
|
|
|
def test_stochastic_depth_forward(self): |
|
|
"""Testing that forward works and we get randomness during training, but not during eval.""" |
|
|
random_input = torch.rand((1, 2, 2)) |
|
|
random_length = torch.tensor([2, 2], dtype=torch.int64) |
|
|
|
|
|
model = ConformerEncoder( |
|
|
feat_in=2, |
|
|
n_layers=3, |
|
|
d_model=4, |
|
|
feat_out=4, |
|
|
stochastic_depth_drop_prob=0.8, |
|
|
dropout=0.0, |
|
|
dropout_pre_encoder=0.0, |
|
|
dropout_emb=0.0, |
|
|
conv_norm_type="layer_norm", |
|
|
conv_kernel_size=3, |
|
|
) |
|
|
model.train() |
|
|
outputs = [None] * 5 |
|
|
for i in range(5): |
|
|
outputs[i] = model(audio_signal=random_input, length=random_length)[0] |
|
|
|
|
|
num_diff = 0 |
|
|
for i in range(1, 5): |
|
|
if not torch.allclose(outputs[i], outputs[0]): |
|
|
num_diff += 1 |
|
|
assert num_diff > 0 |
|
|
|
|
|
model.eval() |
|
|
outputs = [None] * 5 |
|
|
for i in range(5): |
|
|
outputs[i] = model(audio_signal=random_input, length=random_length)[0] |
|
|
|
|
|
num_diff = 0 |
|
|
for i in range(1, 5): |
|
|
if not torch.allclose(outputs[i], outputs[0]): |
|
|
num_diff += 1 |
|
|
assert num_diff == 0 |
|
|
|