code stringlengths 17 6.64M |
|---|
def densenet121(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-121 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet121, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet161(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-161 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet161, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet169(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-169 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet169, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet201(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-201 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet201, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_mnist(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN MNIST to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_mnist, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN CIFAR to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_cifar, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_5(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.5 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.5 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_5, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_75(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.75 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.75 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_75, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_0(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.0 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.0 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_0, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_3(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.3 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.3 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_3, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet18(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-18 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet18, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet20(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-20 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-20 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet20, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet32(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-32 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-32 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet32, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet34(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-34 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet34, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet44(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-44 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-44 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet44, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet50(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-50 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet50, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet56(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-56 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-56 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet56, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet101(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-101 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet101, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet110(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-110 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-110 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet110, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet152(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-152 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet152, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet1202(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-1202 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-1202 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet1202, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext50_32x4d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-50 32x4d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext50_32x4d, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext101_32x8d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-101 32x8d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext101_32x8d, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet50_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-50-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet50_2, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet101_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-101-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet101_2, MODE, layer_config, pretrained, progress, num_classes)
|
class LeNetMNIST(nn.Module):
def __init__(self):
super(LeNetMNIST, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1)
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(in_features=120, out_features=84)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(in_features=84, out_features=10)
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.pool1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.pool2(out)
out = self.conv3(out)
out = self.relu3(out)
out = torch.flatten(out, 1)
out = self.fc1(out)
out = self.relu4(out)
out = self.fc2(out)
return out
|
def le_net_mnist(pretrained: bool=False, progress: bool=True, num_classes: int=10):
return LeNetMNIST()
|
class LeNetCIFAR(nn.Module):
def __init__(self):
super(LeNetCIFAR, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1)
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(in_features=128, out_features=256)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(in_features=256, out_features=10)
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.pool1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.pool2(out)
out = self.conv3(out)
out = self.relu3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), (- 1))
out = self.fc1(out)
out = self.relu4(out)
out = self.fc2(out)
return out
|
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10):
return LeNetCIFAR()
|
def alexnet(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> AlexNet:
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n The required minimum input size of the model is 63x63.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting AlexNet to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.alexnet, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet121(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-121 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet121, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet161(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-161 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet161, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet169(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-169 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet169, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet201(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-201 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet201, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_mnist(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN MNIST to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_mnist, MODE, layer_config, pretrained, progress, num_classes)
|
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
'\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n '
print('Converting LeNet CNN CIFAR to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_cifar, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_5(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.5 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.5 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_5, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet0_75(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 0.75 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 0.75 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet0_75, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_0(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.0 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.0 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_0, MODE, layer_config, pretrained, progress, num_classes)
|
def mnasnet1_3(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet:
'MNASNet with depth multiplier of 1.3 from\n `"MnasNet: Platform-Aware Neural Architecture Search for Mobile"\n <https://arxiv.org/pdf/1807.11626.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting MNASNet 1.3 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.mnasnet1_3, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet18(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-18 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet18, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet20(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-20 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-20 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet20, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet32(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-32 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-32 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet32, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet34(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-34 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet34, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet44(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-44 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-44 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet44, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet50(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-50 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet50, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet56(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-56 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-56 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet56, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet101(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-101 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet101, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet110(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-110 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-110 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet110, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet152(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-152 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnet152, MODE, layer_config, pretrained, progress, num_classes)
|
def resnet1202(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
'ResNet-1202 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNet-1202 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet1202, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext50_32x4d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-50 32x4d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext50_32x4d, MODE, layer_config, pretrained, progress, num_classes)
|
def resnext101_32x8d(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting ResNext-101 32x8d to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.resnext101_32x8d, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet50_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-50-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet50_2, MODE, layer_config, pretrained, progress, num_classes)
|
def wide_resnet101_2(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config: dict=None) -> ResNet:
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Wide ResNet-101-2 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.wide_resnet101_2, MODE, layer_config, pretrained, progress, num_classes)
|
def create_torchvision_biomodel(model_architecture, mode, layer_config: dict=None, pretrained: bool=False, progress: bool=True, num_classes: int=1000) -> BioModule:
if (not pretrained):
copy_weights = False
model = model_architecture(pretrained, progress, num_classes=num_classes)
else:
copy_weights = True
model = model_architecture(pretrained, progress, num_classes=1000)
if (num_classes != 1000):
model.fc = nn.Linear(model.fc.in_features, num_classes)
return BioModule(model, mode=mode, copy_weights=copy_weights, layer_config=layer_config, output_dim=num_classes)
|
def create_le_net_biomodel(model_architecture, mode, layer_config: dict=None, pretrained: bool=False, progress: bool=True, num_classes: int=10) -> BioModule:
model = model_architecture(pretrained, progress, num_classes=num_classes)
return BioModule(model, mode=mode, copy_weights=False, layer_config=layer_config, output_dim=num_classes)
|
def apply_xavier_init(module):
if (isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d)):
nn.init.xavier_uniform_(module.weight)
if (module.bias is not None):
nn.init.constant_(module.bias, 0)
|
class BioModule(nn.Module):
def __init__(self, module, mode='fa', copy_weights=True, layer_config=None, output_dim=None):
super(BioModule, self).__init__()
self.module = module
self.mode = mode
self.output_dim = output_dim
self.copy_weights = copy_weights
if (layer_config is None):
layer_config = {'type': mode}
self.layer_config = layer_config
if (self.mode == 'dfa'):
if (self.output_dim is None):
raise ValueError('Model `output_dim` is required for Direct Feedback Alignment (dfa) mode')
module_converter = ModuleConverter(mode=self.mode)
self.module = module_converter.convert(self.module, self.copy_weights, self.layer_config, self.output_dim)
def forward(self, x, targets=None, loss_function=None):
output = self.module(x)
if ((self.mode == 'dfa') and self.module.training):
if (targets is None):
raise ValueError('Targets missing for Direct Feedback Alignment mode')
if (loss_function is None):
raise ValueError('You need to introduce your `loss_function` for Direct Feedback Alignment mode')
loss = loss_function(output, targets)
loss_gradient = grad(loss, output, retain_graph=True)[0]
for layer in self.module.modules():
layer.loss_gradient = loss_gradient
return output
|
class ModuleConverter():
def __init__(self, mode='fa'):
self.mode = mode
def convert(self, module, copy_weights=True, layer_config=None, output_dim=None):
layer_counts = self.count_layers(module)
self.replaced_layers_counts = defaultdict((lambda : 0))
self._replace_layers_recursive(module, self.mode, copy_weights, layer_config, output_dim, self.replaced_layers_counts)
print('Module has been converted to {} mode:\n'.format(self.mode))
if (layer_config is not None):
print('The layer configuration was: ', layer_config)
for (layer, count) in self.replaced_layers_counts.items():
if (layer_counts[layer] != count):
print('- There were originally {} {} layers and {} were converted.'.format(layer_counts[layer], layer, count))
else:
print('- All the {} {} layers were converted successfully.'.format(count, layer))
return module
def _replace_layers_recursive(self, module, mode, copy_weights, layer_config, output_dim, replaced_layers):
for module_name in module._modules.keys():
layer = getattr(module, module_name)
new_layer = convert_layer(layer, mode, copy_weights, layer_config, output_dim)
if (new_layer is not None):
replaced_layers[str(type(layer))] += 1
setattr(module, module_name, new_layer)
for (name, child_module) in module.named_children():
self._replace_layers_recursive(child_module, mode, copy_weights, layer_config, output_dim, replaced_layers)
@staticmethod
def count_layers(module):
layer_counts = defaultdict((lambda : 0))
for layer in module.modules():
layer_counts[str(type(layer))] += 1
return layer_counts
|
def train(model, mode, loss_function, optimizer, train_dataloader, device, epoch, multi_gpu, top_k=5, display_iterations=500):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
topk = AverageMeter(('Acc@' + str(top_k)), ':6.2f')
progress = ProgressMeter(len(train_dataloader), [batch_time, data_time, losses, top1, topk], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (idx_batch, (inputs, targets)) in enumerate(train_dataloader):
data_time.update((time.time() - end))
(inputs, targets) = (inputs.to(device), targets.to(device))
if (mode == 'dfa'):
outputs = model(inputs, targets, loss_function)
else:
outputs = model(inputs)
outputs = torch.squeeze(outputs)
loss = loss_function(outputs, targets)
(acc1, acck) = accuracy(outputs, targets, topk=(1, top_k))
losses.update(loss.item(), inputs.size(0))
top1.update(acc1[0], inputs.size(0))
topk.update(acck[0], inputs.size(0))
model.zero_grad()
loss.backward()
optimizer.step()
if (mode == 'weight_mirroring'):
if multi_gpu:
model.module.mirror_weights(torch.randn(inputs.size()).to(device), growth_control=True)
else:
model.mirror_weights(torch.randn(inputs.size()).to(device), growth_control=True)
batch_time.update((time.time() - end))
end = time.time()
if ((idx_batch % display_iterations) == 0):
progress.display(idx_batch)
return (top1.avg, losses.avg)
|
def test(model, loss_function, test_dataloader, device, top_k=5):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
topk = AverageMeter(('Acc@' + str(top_k)), ':6.2f')
model.eval()
with torch.no_grad():
end = time.time()
for (idx_batch, (data, target)) in enumerate(test_dataloader):
(inputs, targets) = (data.to(device), target.to(device))
outputs = model(inputs)
outputs = torch.squeeze(outputs)
batch_time.update((time.time() - end))
end = time.time()
loss = loss_function(outputs, targets)
(acc1, acc5) = accuracy(outputs, targets, topk=(1, top_k))
losses.update(loss.item(), inputs.size(0))
top1.update(acc1[0], inputs.size(0))
topk.update(acc5[0], inputs.size(0))
print(' * Acc@1 {top1.avg:.3f} Acc@{top_k} {topk.avg:.3f}'.format(top1=top1, top_k=top_k, topk=topk))
return (top1.avg, losses.avg)
|
def adjust_learning_rate(optimizer, epoch, args):
'Sets the learning rate to the initial LR decayed by 10 every 30 epochs'
lr = (args.lr * (0.1 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def compute_angles_module(module):
queue = deque()
layers_alignment = OrderedDict()
seen_keys = defaultdict((lambda : 0))
for module_keys in module._modules.keys():
queue.append((module, module_keys))
while (len(queue) > 0):
(module, module_key) = queue.popleft()
layer = getattr(module, module_key)
if ('alignment' in layer.__dict__):
angle = layer.compute_alignment()
key_name = ((module_key + '_') + str(seen_keys[module_key]))
seen_keys[module_key] += 1
layers_alignment[key_name] = angle.item()
if (len(layer._modules.keys()) > 0):
for key in list(layer._modules.keys())[::(- 1)]:
queue.appendleft((layer, key))
return layers_alignment
|
def compute_weight_ratio_module(module, mode):
queue = deque()
weight_diff = OrderedDict()
seen_keys = defaultdict((lambda : 0))
for module_keys in module._modules.keys():
queue.append((module, module_keys))
while (len(queue) > 0):
(module, module_key) = queue.popleft()
layer = getattr(module, module_key)
weight = None
if ((mode == 'backpropagation') and isinstance(layer, (torch.nn.Conv2d, torch.nn.Linear))):
with torch.no_grad():
weight = torch.linalg.norm(layer.weight)
elif ('weight_ratio' in layer.__dict__):
weight = layer.compute_weight_ratio()
if (weight is not None):
key_name = ((module_key + '_') + str(seen_keys[module_key]))
seen_keys[module_key] += 1
weight_diff[key_name] = weight.item()
if (len(layer._modules.keys()) > 0):
for key in list(layer._modules.keys())[::(- 1)]:
queue.appendleft((layer, key))
return weight_diff
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
|
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=''):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [(self.prefix + self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str((num_batches // 1)))
fmt = (('{:' + str(num_digits)) + 'd}')
return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']')
|
def accuracy(output, target, topk=(1,)):
'Computes the accuracy over the k top predictions for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
class Trainer():
def __init__(self, model, mode, loss_function, optimizer, lr_scheduler, train_dataloader, val_dataloader, device, epochs, output_dir, metrics_config, multi_gpu=False):
self.model = model
self.mode = mode
self.output_dir = output_dir
self.logs_dir = os.path.join(output_dir, 'logs')
self.loss_function = loss_function
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.device = device
self.epochs = epochs
self.multi_gpu = multi_gpu
self.display_iterations = metrics_config['display_iterations']
self.record_layer_alignment = metrics_config['layer_alignment']
self.record_weight_ratio = metrics_config['weight_ratio']
self.top_k = metrics_config['top_k']
self.writer = SummaryWriter(self.logs_dir)
self.layer_alignment_modes = ['fa', 'usf', 'frsf', 'brsf']
def write_layer_alignment(self, epoch):
if self.record_layer_alignment:
if (self.mode in self.layer_alignment_modes):
try:
layers_alignment = compute_angles_module(self.model)
self.writer.add_scalars('layer_alignment/train', layers_alignment, epoch)
except BaseException:
pass
else:
print('Layer alignment is not implemented for {}'.format(self.mode))
def write_weight_ratio(self, epoch):
if self.record_weight_ratio:
try:
weight_difference = compute_weight_ratio_module(self.model, self.mode)
self.writer.add_scalars('weight_difference/train', weight_difference, epoch)
except BaseException:
pass
def run(self):
self.best_acc = 0.0
for epoch in range(self.epochs):
self.write_layer_alignment(epoch)
self.write_weight_ratio(epoch)
t = time.time()
(acc, loss) = train(model=self.model, mode=self.mode, loss_function=self.loss_function, optimizer=self.optimizer, train_dataloader=self.train_dataloader, device=self.device, multi_gpu=self.multi_gpu, epoch=epoch, top_k=self.top_k, display_iterations=self.display_iterations)
self.writer.add_scalar('accuracy/train', acc, epoch)
self.writer.add_scalar('loss/train', loss, epoch)
(acc, loss) = test(model=self.model, loss_function=self.loss_function, test_dataloader=self.val_dataloader, device=self.device, top_k=self.top_k)
self.writer.add_scalar('accuracy/test', acc, epoch)
self.writer.add_scalar('loss/test', loss, epoch)
if (acc > self.best_acc):
self.best_acc = max(acc, self.best_acc)
print('New best accuracy reached: {} \nSaving best accuracy model...'.format(self.best_acc))
if self.multi_gpu:
torch.save(self.model.module, os.path.join(self.output_dir, 'model_best_acc.pth'))
else:
torch.save(self.model, os.path.join(self.output_dir, 'model_best_acc.pth'))
torch.save(self.model, os.path.join(self.output_dir, 'latest_model.pth'))
total_time = (time.time() - t)
self.lr_scheduler.step()
self.writer.add_scalar('time/train', total_time, epoch)
with open(os.path.join(self.output_dir, 'best_acc.txt'), 'w') as f:
f.write(str(self.best_acc))
self.write_layer_alignment(epoch)
self.write_weight_ratio(epoch)
|
def read_yaml(yaml_path):
with open(yaml_path, 'r') as f:
yaml_file = yaml.load(f, Loader=yaml.Loader)
return yaml_file
|
def mkdir(path):
if (not os.path.exists(path)):
return os.makedirs(path)
|
def mkdirs(paths):
if (isinstance(paths, list) and (not isinstance(paths, str))):
for path in paths:
mkdir(path)
else:
mkdir(paths)
|
def path_exists(path):
if os.path.exists(path):
return True
else:
raise ValueError('Path provided does not exist.')
|
def read_schema(schema_name):
with open(os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'schemas', (schema_name + '.json')))) as schema:
return json.load(schema)
|
def validate_config(instance, schema_name, defaults=True):
with open(os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'schemas', (schema_name + '.json')))) as schema:
if defaults:
default_validator = extend_schema_with_default(Draft7Validator)
try:
default_validator(json.load(schema)).validate(instance)
except ValueError:
raise ValueError('Error when validating the default schema.')
else:
try:
jsonschema.validate(instance, json.load(schema))
except ValueError:
raise ValueError('Error when validating the schema.')
|
def extend_schema_with_default(validator_class):
validate_properties = validator_class.VALIDATORS['properties']
def set_defaults(validator, properties, instance, schema):
for (property_, subschema) in properties.items():
if (('default' in subschema) and (not isinstance(instance, list))):
instance.setdefault(property_, subschema['default'])
for error in validate_properties(validator, properties, instance, schema):
(yield error)
return validators.extend(validator_class, {'properties': set_defaults})
|
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
self.relu3 = nn.ReLU()
self.fc = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
out = self.pool1(self.relu1(self.conv1(x)))
out = self.pool2(self.relu2(self.conv2(out)))
out = self.relu3(self.conv3(out))
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), (- 1))
return self.fc(out)
|
def tflog2pandas(path: str) -> pd.DataFrame:
'convert single tensorflow log file to pandas DataFrame\n Parameters\n ----------\n path : str\n path to tensorflow log file\n Returns\n -------\n pd.DataFrame\n converted dataframe\n '
DEFAULT_SIZE_GUIDANCE = {'compressedHistograms': 1, 'images': 1, 'scalars': 0, 'histograms': 1}
runlog_data = pd.DataFrame({'metric': [], 'value': [], 'step': []})
try:
event_acc = EventAccumulator(path, DEFAULT_SIZE_GUIDANCE)
event_acc.Reload()
tags = event_acc.Tags()['scalars']
for tag in tags:
event_list = event_acc.Scalars(tag)
values = list(map((lambda x: x.value), event_list))
step = list(map((lambda x: x.step), event_list))
r = {'metric': ([tag] * len(step)), 'value': values, 'step': step}
r = pd.DataFrame(r)
runlog_data = pd.concat([runlog_data, r])
except Exception:
print('Event file possibly corrupt: {}'.format(path))
traceback.print_exc()
return runlog_data
|
def sorting_function(x1, x2):
x1_s = x1.split('_')
x2_s = x2.split('_')
if (int(x1_s[1]) < int(x2_s[1])):
return (- 1)
elif (int(x1_s[1]) > int(x2_s[1])):
return 1
elif (x1_s[0] <= x2_s[0]):
return (- 1)
else:
return 1
|
def get_layer_alignment(dir_logs, net='resnet'):
layers_paths = [folder for folder in os.listdir(dir_logs)]
event_paths = []
layers_alignment = {}
for l_p in layers_paths:
if ('layer_alignment' in l_p):
log_path = glob.glob(os.path.join(dir_logs, l_p, 'event*'))
if (len(log_path) > 0):
df = tflog2pandas(log_path[0])
layers_alignment[l_p.replace('layer_alignment_train_', '')] = df['value'].tolist()
keys = list(layers_alignment.keys())
keys = sorted(keys, key=cmp_to_key(sorting_function))
if (net == 'resnet'):
keys.remove('fc_0')
keys.append('fc_0')
elif (net == 'lenet'):
keys.remove('fc1_0')
keys.remove('fc2_0')
keys.append('fc1_0')
keys.append('fc2_0')
return {key: layers_alignment[key] for key in keys}
|
def get_layer_weights(dir_logs, net='resnet', normalization=None):
layers_paths = [folder for folder in os.listdir(dir_logs)]
event_paths = []
layers_alignment = {}
for l_p in layers_paths:
if ('weight_difference' in l_p):
log_path = glob.glob(os.path.join(dir_logs, l_p, 'event*'))
if (len(log_path) > 0):
df = tflog2pandas(log_path[0])
layers_alignment[l_p.replace('weight_difference_train_', '')] = df['value'].tolist()
keys = list(layers_alignment.keys())
keys = sorted(keys, key=cmp_to_key(sorting_function))
if (net == 'resnet'):
keys.remove('fc_0')
keys.append('fc_0')
elif (net == 'lenet'):
keys.remove('fc1_0')
keys.remove('fc2_0')
keys.append('fc1_0')
keys.append('fc2_0')
layer_weights = {key: layers_alignment[key] for key in keys}
if normalization:
count = 0
for (key, value) in layer_weights.items():
layer_weights[key] = (np.array(value) * normalization[count])
count += 1
return layer_weights
|
def mkdir(path):
if (not os.path.exists(path)):
return os.makedirs(path)
|
def plot_multiple_lists(ydata, xdata, x_axis_name, y_axis_name, title, save_dir, figname, cmap='winter'):
n = len(ydata)
cmap_ = plt.cm.get_cmap(cmap)
colors = iter(cmap_(np.linspace(0, 1, n)))
colors_cmap = cmap_(np.arange(cmap_.N))
Z = [[0, 0], [0, 0]]
levels = range(0, n, 1)
CS3 = plt.contourf(Z, levels, cmap=cmap_)
plt.clf()
mkdir(save_dir)
with plt.style.context('ggplot'):
for (name, data) in ydata.items():
plt.plot(xdata, data, color=next(colors))
plt.title(title, pad=10)
plt.xlabel(x_axis_name)
plt.ylabel(y_axis_name)
cbar = plt.colorbar(CS3)
cbar.set_label('Layer Depth', labelpad=10)
plt.tight_layout()
plt.savefig('{}/{}.pdf'.format(save_dir, figname), dpi=200)
plt.show()
|
class FGSM(Attack):
"\n FGSM in the paper 'Explaining and harnessing adversarial examples'\n [https://arxiv.org/abs/1412.6572]\n Distance Measure : Linf\n Arguments:\n model (nn.Module): model to attack.\n eps (float): maximum perturbation. (Default: 0.007)\n Shape:\n - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].\n - labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \\leq y_i \\leq` `number of labels`.\n - output: :math:`(N, C, H, W)`.\n Examples::\n >>> attack = torchattacks.FGSM(model, eps=0.007)\n >>> adv_images = attack(images, labels)\n "
def __init__(self, model, eps=0.35, mode='bp'):
super().__init__('FGSM', model)
self.eps = eps
self._supported_mode = ['default', 'targeted']
self.mode = mode
def forward(self, images, labels):
'\n Overridden.\n '
images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
loss_function = nn.CrossEntropyLoss()
images.requires_grad = True
outputs = self.model(images)
cost = loss_function(outputs, labels)
if (self.mode == 'DFA'):
self.model.zero_grad()
loss_gradient = torch.autograd.grad(cost, outputs, retain_graph=True)[0]
for layer in self.model[1].module.modules():
layer.loss_gradient = loss_gradient
cost.backward()
grad = images.grad
else:
grad = torch.autograd.grad(cost, images, retain_graph=False, create_graph=False)[0]
self.grad = grad
adv_images = (images + (self.eps * grad.sign()))
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
return adv_images
|
class PGD(Attack):
"\n PGD in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'\n [https://arxiv.org/abs/1706.06083]\n Distance Measure : Linf\n Arguments:\n model (nn.Module): model to attack.\n eps (float): maximum perturbation. (Default: 0.3)\n alpha (float): step size. (Default: 2/255)\n steps (int): number of steps. (Default: 40)\n random_start (bool): using random initialization of delta. (Default: True)\n Shape:\n - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].\n - labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \\leq y_i \\leq` `number of labels`.\n - output: :math:`(N, C, H, W)`.\n Examples::\n >>> attack = torchattacks.PGD(model, eps=8/255, alpha=1/255, steps=40, random_start=True)\n >>> adv_images = attack(images, labels)\n "
def __init__(self, model, eps=0.35, mode='bp', alpha=(2 / 255), steps=40, random_start=True):
super().__init__('PGD', model)
self.eps = eps
self.alpha = alpha
self.steps = steps
self.random_start = random_start
self._supported_mode = ['default', 'targeted']
self.mode = mode
def forward(self, images, labels):
'\n Overridden.\n '
images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
loss = nn.CrossEntropyLoss()
adv_images = images.clone().detach()
if self.random_start:
adv_images = (adv_images + torch.empty_like(adv_images).uniform_((- self.eps), self.eps))
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
for _ in range(self.steps):
adv_images.requires_grad = True
outputs = self.model(adv_images)
cost = loss(outputs, labels)
if (self.mode == 'DFA'):
self.model.zero_grad()
loss_gradient = torch.autograd.grad(cost, outputs, retain_graph=True)[0]
for layer in self.model[1].module.modules():
layer.loss_gradient = loss_gradient
cost.backward()
grad = adv_images.grad
else:
grad = torch.autograd.grad(cost, adv_images, retain_graph=False, create_graph=False)[0]
self.grad = grad
adv_images = (adv_images.detach() + (self.alpha * grad.sign()))
delta = torch.clamp((adv_images - images), min=(- self.eps), max=self.eps)
adv_images = torch.clamp((images + delta), min=0, max=1).detach()
return adv_images
|
class TPGD(Attack):
"\n PGD based on KL-Divergence loss in the paper 'Theoretically Principled Trade-off between Robustness and Accuracy'\n [https://arxiv.org/abs/1901.08573]\n Distance Measure : Linf\n Arguments:\n model (nn.Module): model to attack.\n eps (float): strength of the attack or maximum perturbation. (Default: 8/255)\n alpha (float): step size. (Default: 2/255)\n steps (int): number of steps. (Default: 7)\n Shape:\n - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].\n - output: :math:`(N, C, H, W)`.\n Examples::\n >>> attack = torchattacks.TPGD(model, eps=8/255, alpha=2/255, steps=7)\n >>> adv_images = attack(images)\n "
def __init__(self, model, mode='bp', eps=(8 / 255), alpha=(2 / 255), steps=7):
super().__init__('TPGD', model)
self.eps = eps
self.alpha = alpha
self.steps = steps
self._supported_mode = ['default']
self.mode = mode
def forward(self, images, labels=None):
'\n Overridden.\n '
images = images.clone().detach().to(self.device)
logit_ori = self.model(images).detach()
labels = F.softmax(logit_ori, dim=1)
adv_images = (images + (0.001 * torch.randn_like(images)))
adv_images = torch.clamp(adv_images, min=0, max=1).detach()
loss = nn.KLDivLoss(reduction='sum')
for _ in range(self.steps):
adv_images.requires_grad = True
logit_adv = self.model(adv_images)
outputs = F.log_softmax(logit_adv, dim=1)
cost = loss(outputs, labels)
if (self.mode == 'DFA'):
self.model.zero_grad()
loss_gradient = torch.autograd.grad(cost, outputs, retain_graph=True)[0]
for layer in self.model[1].module.modules():
layer.loss_gradient = loss_gradient
cost.backward()
grad = adv_images.grad
else:
grad = torch.autograd.grad(cost, adv_images, retain_graph=False, create_graph=False)[0]
adv_images = (adv_images.detach() + (self.alpha * grad.sign()))
delta = torch.clamp((adv_images - images), min=(- self.eps), max=self.eps)
adv_images = torch.clamp((images + delta), min=0, max=1).detach()
return adv_images
|
@pytest.fixture(scope='session')
def config_bp_path():
return os.path.abspath(os.path.join('tests', 'fixtures', 'config_files', 'config_bp.yaml'))
|
@pytest.fixture(scope='session')
def config_usf_reproducible_path():
return os.path.abspath(os.path.join('tests', 'fixtures', 'config_files', 'config_usf_reproducible.yaml'))
|
def test_benchmark(config_bp_path):
benchmark = Benchmark(config_bp_path)
benchmark.run()
current_files = os.listdir('tests/tmp/mnist/le_net/backpropagation_test/')
expected_files = ['best_acc.txt', 'config.yaml', 'latest_model.pth', 'results.csv', 'results.json', 'model_best_acc.pth', 'logs']
for file in expected_files:
assert (file in current_files)
|
def test_benchmark_command_line_reproducibility_cpu(config_usf_reproducible_path):
cmd = ['python', 'benchmark.py', '--config', config_usf_reproducible_path]
subprocess.run(cmd)
results_1 = pd.read_json('tests/tmp/mnist/le_net/usf_test/results.json')
cmd = ['python', 'benchmark.py', '--config', config_usf_reproducible_path]
subprocess.run(cmd)
results_2 = pd.read_json('tests/tmp/mnist/le_net/usf_test/results.json')
pd.testing.assert_frame_equal(results_1, results_2)
|
@pytest.fixture(scope='session')
def mode_types():
return ['backpropagation', 'fa', 'dfa', 'usf', 'brsf', 'frsf']
|
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 3)
self.relu = nn.ReLU()
self.fc = nn.Linear(20, 10)
def forward(self, x):
out = self.relu(self.conv1(x))
out = F.avg_pool2d(out, out.size()[3])
return self.fc(out)
|
@pytest.fixture(scope='function')
def dummy_net():
return Model()
|
@pytest.fixture(scope='function')
def dummy_net_constructor():
return Model
|
@pytest.fixture(scope='session')
def datasets_available():
return ['mnist', 'cifar10', 'cifar10_benchmark', 'cifar100', 'fashion_mnist', 'imagenet']
|
def test_datasets_implemented(datasets_available):
for dataset_name in datasets_available:
assert DatasetSelector(dataset_name).get_dataset()
|
@pytest.fixture(scope='session')
def model_architectures():
return [('le_net_mnist', (1, 1, 32, 32)), ('le_net_cifar', (1, 3, 32, 32)), ('resnet18', (1, 3, 128, 128)), ('resnet20', (1, 3, 128, 128)), ('resnet56', (1, 3, 128, 128))]
|
def check_model(model, input_size):
model_ = model()
if (('mode' in model_.__dict__) and (model_.mode == 'dfa')):
_ = model_.forward(torch.rand(input_size), targets=torch.LongTensor([1]), loss_function=torch.nn.CrossEntropyLoss())
else:
_ = model_(torch.rand(input_size))
|
def test_backpropagation_models(model_architectures):
for (arch, input_size) in model_architectures:
check_model(models.backpropagation.__dict__[arch], input_size)
|
def test_fa_models(model_architectures):
for (arch, input_size) in model_architectures:
check_model(models.fa.__dict__[arch], input_size)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.