|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
import tempfile |
|
|
|
|
|
import pytest |
|
|
import torch |
|
|
|
|
|
from nemo.core.classes.module import NeuralModule |
|
|
|
|
|
|
|
|
class TempModule(NeuralModule): |
|
|
|
|
|
def __init__(self): |
|
|
super().__init__() |
|
|
|
|
|
self.layer1 = torch.nn.Linear(10, 10, bias=False) |
|
|
self.layer2 = torch.nn.Linear(10, 10, bias=False) |
|
|
|
|
|
|
|
|
class TestNeuralModule: |
|
|
|
|
|
@pytest.mark.unit |
|
|
def test_num_weights(self): |
|
|
module = TempModule() |
|
|
assert module.num_weights == 200 |
|
|
|
|
|
@pytest.mark.unit |
|
|
def test_freeze(self): |
|
|
module = TempModule() |
|
|
module.freeze() |
|
|
for p in module.parameters(): |
|
|
assert not p.requires_grad |
|
|
|
|
|
@pytest.mark.unit |
|
|
def test_unfreeze(self): |
|
|
module = TempModule() |
|
|
module.freeze() |
|
|
module.unfreeze() |
|
|
for p in module.parameters(): |
|
|
assert p.requires_grad |
|
|
|
|
|
@pytest.mark.unit |
|
|
def test_as_frozen(self): |
|
|
module = TempModule() |
|
|
|
|
|
for p in module.parameters(): |
|
|
assert p.requires_grad |
|
|
|
|
|
with module.as_frozen(): |
|
|
for p in module.parameters(): |
|
|
assert not p.requires_grad |
|
|
|
|
|
for p in module.parameters(): |
|
|
assert p.requires_grad |
|
|
|
|
|
@pytest.mark.unit |
|
|
def test_partial_unfreeze(self): |
|
|
module = TempModule() |
|
|
|
|
|
for param in module.layer1.parameters(): |
|
|
param.requires_grad = False |
|
|
|
|
|
module.freeze() |
|
|
|
|
|
for param in module.layer1.parameters(): |
|
|
assert not param.requires_grad |
|
|
|
|
|
assert module._frozen_grad_map is not None |
|
|
assert len(module._frozen_grad_map) == 2 |
|
|
assert module._frozen_grad_map['layer1.weight'] is False |
|
|
|
|
|
module.unfreeze(partial=True) |
|
|
|
|
|
|
|
|
assert module.layer1.weight.requires_grad is False |
|
|
assert not hasattr(module, '_frozen_grad_map') |
|
|
|