code stringlengths 17 6.64M |
|---|
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, filter_size=1, pool_only=False, relu_first=True):
super(AlexNet, self).__init__()
if pool_only:
first_ds = [nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2)]
elif relu_first:
first_ds = [nn.Conv2d(3, 64, kernel_size=11, stride=2, padding=2), nn.ReLU(inplace=True), Downsample(filt_size=filter_size, stride=2, channels=64)]
else:
first_ds = [nn.Conv2d(3, 64, kernel_size=11, stride=2, padding=2), Downsample(filt_size=filter_size, stride=2, channels=64), nn.ReLU(inplace=True)]
first_ds += [nn.MaxPool2d(kernel_size=3, stride=1), Downsample(filt_size=filter_size, stride=2, channels=64), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=1), Downsample(filt_size=filter_size, stride=2, channels=192), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=1), Downsample(filt_size=filter_size, stride=2, channels=256)]
self.features = nn.Sequential(*first_ds)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), ((256 * 6) * 6))
x = self.classifier(x)
return x
|
def alexnet(pretrained=False, **kwargs):
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model
|
class AlexNetNMP(nn.Module):
def __init__(self, num_classes=1000, filter_size=1):
super(AlexNetNMP, self).__init__()
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), Downsample(filt_size=filter_size, stride=2, channels=64, pad_off=(- 1), hidden=True), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), Downsample(filt_size=filter_size, stride=2, channels=192, pad_off=(- 1), hidden=True), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), Downsample(filt_size=filter_size, stride=2, channels=256, pad_off=(- 1), hidden=True))
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), ((256 * 6) * 6))
x = self.classifier(x)
return x
|
def alexnetnmp(pretrained=False, **kwargs):
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = AlexNetNMP(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model
|
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
(self.add_module('norm1', nn.BatchNorm2d(num_input_features)),)
(self.add_module('relu1', nn.ReLU(inplace=True)),)
(self.add_module('conv1', nn.Conv2d(num_input_features, (bn_size * growth_rate), kernel_size=1, stride=1, bias=False)),)
(self.add_module('norm2', nn.BatchNorm2d((bn_size * growth_rate))),)
(self.add_module('relu2', nn.ReLU(inplace=True)),)
(self.add_module('conv2', nn.Conv2d((bn_size * growth_rate), growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),)
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if (self.drop_rate > 0):
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
|
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer((num_input_features + (i * growth_rate)), growth_rate, bn_size, drop_rate)
self.add_module(('denselayer%d' % (i + 1)), layer)
|
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features, filter_size=1):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module('pool', Downsample(filt_size=filter_size, stride=2, channels=num_output_features))
|
class DenseNet(nn.Module):
'Densenet-BC model class, based on\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n '
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, filter_size=1, pool_only=True):
super(DenseNet, self).__init__()
if pool_only:
self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ('max0', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)), ('pool0', Downsample(filt_size=filter_size, stride=2, channels=num_init_features))]))
else:
self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=1, padding=3, bias=False)), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ('ds0', Downsample(filt_size=filter_size, stride=2, channels=num_init_features)), ('max0', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)), ('pool0', Downsample(filt_size=filter_size, stride=2, channels=num_init_features))]))
num_features = num_init_features
for (i, num_layers) in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module(('denseblock%d' % (i + 1)), block)
num_features = (num_features + (num_layers * growth_rate))
if (i != (len(block_config) - 1)):
trans = _Transition(num_input_features=num_features, num_output_features=(num_features // 2), filter_size=filter_size)
self.features.add_module(('transition%d' % (i + 1)), trans)
num_features = (num_features // 2)
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.classifier = nn.Linear(num_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if ((m.in_channels != m.out_channels) or (m.out_channels != m.groups) or (m.bias is not None)):
nn.init.kaiming_normal_(m.weight)
else:
print('Not initializing')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), (- 1))
out = self.classifier(out)
return out
|
def _load_state_dict(model, model_url):
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_url)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
|
def densenet121(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
_load_state_dict(model, model_urls['densenet121'])
return model
|
def densenet169(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
_load_state_dict(model, model_urls['densenet169'])
return model
|
def densenet201(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
_load_state_dict(model, model_urls['densenet201'])
return model
|
def densenet161(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
_load_state_dict(model, model_urls['densenet161'])
return model
|
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = ((kernel_size - 1) // 2)
super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU6(inplace=True))
|
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, filter_size=1):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = int(round((inp * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
layers = []
if (expand_ratio != 1):
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
if (stride == 1):
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)])
else:
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=1, groups=hidden_dim), Downsample(filt_size=filter_size, stride=stride, channels=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
|
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.0, filter_size=1):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
input_channel = int((input_channel * width_mult))
self.last_channel = int((last_channel * max(1.0, width_mult)))
features = [ConvBNReLU(3, input_channel, stride=2)]
for (t, c, n, s) in inverted_residual_setting:
output_channel = int((c * width_mult))
for i in range(n):
stride = (s if (i == 0) else 1)
features.append(block(input_channel, output_channel, stride, expand_ratio=t, filter_size=filter_size))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Linear(self.last_channel, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = x.mean([2, 3])
x = self.classifier(x)
return x
|
def mobilenet_v2(pretrained=False, progress=True, filter_size=1, **kwargs):
'\n Constructs a MobileNetV2 architecture from\n `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
model = MobileNetV2(filter_size=filter_size, **kwargs)
return model
|
def conv3x3(in_planes, out_planes, stride=1, groups=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, norm_layer=None, filter_size=1):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if (groups != 1):
raise ValueError('BasicBlock only supports groups=1')
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
if (stride == 1):
self.conv2 = conv3x3(planes, planes)
else:
self.conv2 = nn.Sequential(Downsample(filt_size=filter_size, stride=stride, channels=planes), conv3x3(planes, planes))
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, norm_layer=None, filter_size=1):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = conv3x3(planes, planes, groups)
self.bn2 = norm_layer(planes)
if (stride == 1):
self.conv3 = conv1x1(planes, (planes * self.expansion))
else:
self.conv3 = nn.Sequential(Downsample(filt_size=filter_size, stride=stride, channels=planes), conv1x1(planes, (planes * self.expansion)))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, norm_layer=None, filter_size=1, pool_only=True):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
planes = [int(((width_per_group * groups) * (2 ** i))) for i in range(4)]
self.inplanes = planes[0]
if pool_only:
self.conv1 = nn.Conv2d(3, planes[0], kernel_size=7, stride=2, padding=3, bias=False)
else:
self.conv1 = nn.Conv2d(3, planes[0], kernel_size=7, stride=1, padding=3, bias=False)
self.bn1 = norm_layer(planes[0])
self.relu = nn.ReLU(inplace=True)
if pool_only:
self.maxpool = nn.Sequential(*[nn.MaxPool2d(kernel_size=2, stride=1), Downsample(filt_size=filter_size, stride=2, channels=planes[0])])
else:
self.maxpool = nn.Sequential(*[Downsample(filt_size=filter_size, stride=2, channels=planes[0]), nn.MaxPool2d(kernel_size=2, stride=1), Downsample(filt_size=filter_size, stride=2, channels=planes[0])])
self.layer1 = self._make_layer(block, planes[0], layers[0], groups=groups, norm_layer=norm_layer)
self.layer2 = self._make_layer(block, planes[1], layers[1], stride=2, groups=groups, norm_layer=norm_layer, filter_size=filter_size)
self.layer3 = self._make_layer(block, planes[2], layers[2], stride=2, groups=groups, norm_layer=norm_layer, filter_size=filter_size)
self.layer4 = self._make_layer(block, planes[3], layers[3], stride=2, groups=groups, norm_layer=norm_layer, filter_size=filter_size)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((planes[3] * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if ((m.in_channels != m.out_channels) or (m.out_channels != m.groups) or (m.bias is not None)):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
else:
print('Not initializing')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, groups=1, norm_layer=None, filter_size=1):
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = ([Downsample(filt_size=filter_size, stride=stride, channels=self.inplanes)] if (stride != 1) else [])
downsample += [conv1x1(self.inplanes, (planes * block.expansion), 1), norm_layer((planes * block.expansion))]
downsample = nn.Sequential(*downsample)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, groups, norm_layer, filter_size=filter_size))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=groups, norm_layer=norm_layer, filter_size=filter_size))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x
|
def resnet18(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2], filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
|
def resnet34(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [3, 4, 6, 3], filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
|
def resnet50(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3], filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
|
def resnet101(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3], filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
def resnet152(pretrained=False, filter_size=1, pool_only=True, **kwargs):
'Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 8, 36, 3], filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
def resnext50_32x4d(pretrained=False, filter_size=1, pool_only=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], groups=4, width_per_group=32, filter_size=filter_size, pool_only=pool_only, **kwargs)
return model
|
def resnext101_32x8d(pretrained=False, filter_size=1, pool_only=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], groups=8, width_per_group=32, filter_size=filter_size, pool_only=pool_only, **kwargs)
return model
|
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if ((m.in_channels != m.out_channels) or (m.out_channels != m.groups) or (m.bias is not None)):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
else:
print('Not initializing')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
|
def make_layers(cfg, batch_norm=False, filter_size=1):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=1), Downsample(filt_size=filter_size, stride=2, channels=in_channels)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
|
def vgg11(pretrained=False, filter_size=1, **kwargs):
'VGG 11-layer model (configuration "A")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], filter_size=filter_size), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
return model
|
def vgg11_bn(pretrained=False, filter_size=1, **kwargs):
'VGG 11-layer model (configuration "A") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], filter_size=filter_size, batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
|
def vgg13(pretrained=False, filter_size=1, **kwargs):
'VGG 13-layer model (configuration "B")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B'], filter_size=filter_size), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model
|
def vgg13_bn(pretrained=False, filter_size=1, **kwargs):
'VGG 13-layer model (configuration "B") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B'], filter_size=filter_size, batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
return model
|
def vgg16(pretrained=False, filter_size=1, **kwargs):
'VGG 16-layer model (configuration "D")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D'], filter_size=filter_size), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
|
def vgg16_bn(pretrained=False, filter_size=1, **kwargs):
'VGG 16-layer model (configuration "D") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D'], filter_size=filter_size, batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
return model
|
def vgg19(pretrained=False, filter_size=1, **kwargs):
'VGG 19-layer model (configuration "E")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E'], filter_size=filter_size), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
return model
|
def vgg19_bn(pretrained=False, filter_size=1, **kwargs):
"VGG 19-layer model (configuration 'E') with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n "
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E'], filter_size=filter_size, batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
return model
|
def load_weights(weight_file):
if (weight_file == None):
return
try:
weights_dict = np.load(weight_file, allow_pickle=True).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
|
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.bn_data = self.__batch_normalization(2, 'bn_data', num_features=3, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.conv0 = self.__conv(2, name='conv0', in_channels=3, out_channels=64, kernel_size=(7, 7), stride=(2, 2), groups=1, bias=False)
self.bn0 = self.__batch_normalization(2, 'bn0', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit1_bn1 = self.__batch_normalization(2, 'stage1_unit1_bn1', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit1_conv1 = self.__conv(2, name='stage1_unit1_conv1', in_channels=64, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage1_unit1_sc = self.__conv(2, name='stage1_unit1_sc', in_channels=64, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage1_unit1_bn2 = self.__batch_normalization(2, 'stage1_unit1_bn2', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit1_conv2 = self.__conv(2, name='stage1_unit1_conv2', in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage1_unit1_bn3 = self.__batch_normalization(2, 'stage1_unit1_bn3', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit1_conv3 = self.__conv(2, name='stage1_unit1_conv3', in_channels=64, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage1_unit2_bn1 = self.__batch_normalization(2, 'stage1_unit2_bn1', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit2_conv1 = self.__conv(2, name='stage1_unit2_conv1', in_channels=256, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage1_unit2_bn2 = self.__batch_normalization(2, 'stage1_unit2_bn2', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit2_conv2 = self.__conv(2, name='stage1_unit2_conv2', in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage1_unit2_bn3 = self.__batch_normalization(2, 'stage1_unit2_bn3', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit2_conv3 = self.__conv(2, name='stage1_unit2_conv3', in_channels=64, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage1_unit3_bn1 = self.__batch_normalization(2, 'stage1_unit3_bn1', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit3_conv1 = self.__conv(2, name='stage1_unit3_conv1', in_channels=256, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage1_unit3_bn2 = self.__batch_normalization(2, 'stage1_unit3_bn2', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit3_conv2 = self.__conv(2, name='stage1_unit3_conv2', in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage1_unit3_bn3 = self.__batch_normalization(2, 'stage1_unit3_bn3', num_features=64, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage1_unit3_conv3 = self.__conv(2, name='stage1_unit3_conv3', in_channels=64, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit1_bn1 = self.__batch_normalization(2, 'stage2_unit1_bn1', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit1_conv1 = self.__conv(2, name='stage2_unit1_conv1', in_channels=256, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit1_sc = self.__conv(2, name='stage2_unit1_sc', in_channels=256, out_channels=512, kernel_size=(1, 1), stride=(2, 2), groups=1, bias=False)
self.stage2_unit1_bn2 = self.__batch_normalization(2, 'stage2_unit1_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit1_conv2 = self.__conv(2, name='stage2_unit1_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=False)
self.stage2_unit1_bn3 = self.__batch_normalization(2, 'stage2_unit1_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit1_conv3 = self.__conv(2, name='stage2_unit1_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit2_bn1 = self.__batch_normalization(2, 'stage2_unit2_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit2_conv1 = self.__conv(2, name='stage2_unit2_conv1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit2_bn2 = self.__batch_normalization(2, 'stage2_unit2_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit2_conv2 = self.__conv(2, name='stage2_unit2_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage2_unit2_bn3 = self.__batch_normalization(2, 'stage2_unit2_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit2_conv3 = self.__conv(2, name='stage2_unit2_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit3_bn1 = self.__batch_normalization(2, 'stage2_unit3_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit3_conv1 = self.__conv(2, name='stage2_unit3_conv1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit3_bn2 = self.__batch_normalization(2, 'stage2_unit3_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit3_conv2 = self.__conv(2, name='stage2_unit3_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage2_unit3_bn3 = self.__batch_normalization(2, 'stage2_unit3_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit3_conv3 = self.__conv(2, name='stage2_unit3_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit4_bn1 = self.__batch_normalization(2, 'stage2_unit4_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit4_conv1 = self.__conv(2, name='stage2_unit4_conv1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit4_bn2 = self.__batch_normalization(2, 'stage2_unit4_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit4_conv2 = self.__conv(2, name='stage2_unit4_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage2_unit4_bn3 = self.__batch_normalization(2, 'stage2_unit4_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit4_conv3 = self.__conv(2, name='stage2_unit4_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit5_bn1 = self.__batch_normalization(2, 'stage2_unit5_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit5_conv1 = self.__conv(2, name='stage2_unit5_conv1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit5_bn2 = self.__batch_normalization(2, 'stage2_unit5_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit5_conv2 = self.__conv(2, name='stage2_unit5_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage2_unit5_bn3 = self.__batch_normalization(2, 'stage2_unit5_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit5_conv3 = self.__conv(2, name='stage2_unit5_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit6_bn1 = self.__batch_normalization(2, 'stage2_unit6_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit6_conv1 = self.__conv(2, name='stage2_unit6_conv1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit6_bn2 = self.__batch_normalization(2, 'stage2_unit6_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit6_conv2 = self.__conv(2, name='stage2_unit6_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage2_unit6_bn3 = self.__batch_normalization(2, 'stage2_unit6_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit6_conv3 = self.__conv(2, name='stage2_unit6_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit7_bn1 = self.__batch_normalization(2, 'stage2_unit7_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit7_conv1 = self.__conv(2, name='stage2_unit7_conv1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit7_bn2 = self.__batch_normalization(2, 'stage2_unit7_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit7_conv2 = self.__conv(2, name='stage2_unit7_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage2_unit7_bn3 = self.__batch_normalization(2, 'stage2_unit7_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit7_conv3 = self.__conv(2, name='stage2_unit7_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit8_bn1 = self.__batch_normalization(2, 'stage2_unit8_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit8_conv1 = self.__conv(2, name='stage2_unit8_conv1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage2_unit8_bn2 = self.__batch_normalization(2, 'stage2_unit8_bn2', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit8_conv2 = self.__conv(2, name='stage2_unit8_conv2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage2_unit8_bn3 = self.__batch_normalization(2, 'stage2_unit8_bn3', num_features=128, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage2_unit8_conv3 = self.__conv(2, name='stage2_unit8_conv3', in_channels=128, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit1_bn1 = self.__batch_normalization(2, 'stage3_unit1_bn1', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit1_conv1 = self.__conv(2, name='stage3_unit1_conv1', in_channels=512, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit1_sc = self.__conv(2, name='stage3_unit1_sc', in_channels=512, out_channels=1024, kernel_size=(1, 1), stride=(2, 2), groups=1, bias=False)
self.stage3_unit1_bn2 = self.__batch_normalization(2, 'stage3_unit1_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit1_conv2 = self.__conv(2, name='stage3_unit1_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=False)
self.stage3_unit1_bn3 = self.__batch_normalization(2, 'stage3_unit1_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit1_conv3 = self.__conv(2, name='stage3_unit1_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit2_bn1 = self.__batch_normalization(2, 'stage3_unit2_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit2_conv1 = self.__conv(2, name='stage3_unit2_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit2_bn2 = self.__batch_normalization(2, 'stage3_unit2_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit2_conv2 = self.__conv(2, name='stage3_unit2_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit2_bn3 = self.__batch_normalization(2, 'stage3_unit2_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit2_conv3 = self.__conv(2, name='stage3_unit2_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit3_bn1 = self.__batch_normalization(2, 'stage3_unit3_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit3_conv1 = self.__conv(2, name='stage3_unit3_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit3_bn2 = self.__batch_normalization(2, 'stage3_unit3_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit3_conv2 = self.__conv(2, name='stage3_unit3_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit3_bn3 = self.__batch_normalization(2, 'stage3_unit3_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit3_conv3 = self.__conv(2, name='stage3_unit3_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit4_bn1 = self.__batch_normalization(2, 'stage3_unit4_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit4_conv1 = self.__conv(2, name='stage3_unit4_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit4_bn2 = self.__batch_normalization(2, 'stage3_unit4_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit4_conv2 = self.__conv(2, name='stage3_unit4_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit4_bn3 = self.__batch_normalization(2, 'stage3_unit4_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit4_conv3 = self.__conv(2, name='stage3_unit4_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit5_bn1 = self.__batch_normalization(2, 'stage3_unit5_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit5_conv1 = self.__conv(2, name='stage3_unit5_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit5_bn2 = self.__batch_normalization(2, 'stage3_unit5_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit5_conv2 = self.__conv(2, name='stage3_unit5_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit5_bn3 = self.__batch_normalization(2, 'stage3_unit5_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit5_conv3 = self.__conv(2, name='stage3_unit5_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit6_bn1 = self.__batch_normalization(2, 'stage3_unit6_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit6_conv1 = self.__conv(2, name='stage3_unit6_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit6_bn2 = self.__batch_normalization(2, 'stage3_unit6_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit6_conv2 = self.__conv(2, name='stage3_unit6_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit6_bn3 = self.__batch_normalization(2, 'stage3_unit6_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit6_conv3 = self.__conv(2, name='stage3_unit6_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit7_bn1 = self.__batch_normalization(2, 'stage3_unit7_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit7_conv1 = self.__conv(2, name='stage3_unit7_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit7_bn2 = self.__batch_normalization(2, 'stage3_unit7_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit7_conv2 = self.__conv(2, name='stage3_unit7_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit7_bn3 = self.__batch_normalization(2, 'stage3_unit7_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit7_conv3 = self.__conv(2, name='stage3_unit7_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit8_bn1 = self.__batch_normalization(2, 'stage3_unit8_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit8_conv1 = self.__conv(2, name='stage3_unit8_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit8_bn2 = self.__batch_normalization(2, 'stage3_unit8_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit8_conv2 = self.__conv(2, name='stage3_unit8_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit8_bn3 = self.__batch_normalization(2, 'stage3_unit8_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit8_conv3 = self.__conv(2, name='stage3_unit8_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit9_bn1 = self.__batch_normalization(2, 'stage3_unit9_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit9_conv1 = self.__conv(2, name='stage3_unit9_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit9_bn2 = self.__batch_normalization(2, 'stage3_unit9_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit9_conv2 = self.__conv(2, name='stage3_unit9_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit9_bn3 = self.__batch_normalization(2, 'stage3_unit9_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit9_conv3 = self.__conv(2, name='stage3_unit9_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit10_bn1 = self.__batch_normalization(2, 'stage3_unit10_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit10_conv1 = self.__conv(2, name='stage3_unit10_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit10_bn2 = self.__batch_normalization(2, 'stage3_unit10_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit10_conv2 = self.__conv(2, name='stage3_unit10_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit10_bn3 = self.__batch_normalization(2, 'stage3_unit10_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit10_conv3 = self.__conv(2, name='stage3_unit10_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit11_bn1 = self.__batch_normalization(2, 'stage3_unit11_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit11_conv1 = self.__conv(2, name='stage3_unit11_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit11_bn2 = self.__batch_normalization(2, 'stage3_unit11_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit11_conv2 = self.__conv(2, name='stage3_unit11_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit11_bn3 = self.__batch_normalization(2, 'stage3_unit11_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit11_conv3 = self.__conv(2, name='stage3_unit11_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit12_bn1 = self.__batch_normalization(2, 'stage3_unit12_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit12_conv1 = self.__conv(2, name='stage3_unit12_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit12_bn2 = self.__batch_normalization(2, 'stage3_unit12_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit12_conv2 = self.__conv(2, name='stage3_unit12_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit12_bn3 = self.__batch_normalization(2, 'stage3_unit12_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit12_conv3 = self.__conv(2, name='stage3_unit12_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit13_bn1 = self.__batch_normalization(2, 'stage3_unit13_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit13_conv1 = self.__conv(2, name='stage3_unit13_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit13_bn2 = self.__batch_normalization(2, 'stage3_unit13_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit13_conv2 = self.__conv(2, name='stage3_unit13_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit13_bn3 = self.__batch_normalization(2, 'stage3_unit13_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit13_conv3 = self.__conv(2, name='stage3_unit13_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit14_bn1 = self.__batch_normalization(2, 'stage3_unit14_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit14_conv1 = self.__conv(2, name='stage3_unit14_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit14_bn2 = self.__batch_normalization(2, 'stage3_unit14_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit14_conv2 = self.__conv(2, name='stage3_unit14_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit14_bn3 = self.__batch_normalization(2, 'stage3_unit14_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit14_conv3 = self.__conv(2, name='stage3_unit14_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit15_bn1 = self.__batch_normalization(2, 'stage3_unit15_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit15_conv1 = self.__conv(2, name='stage3_unit15_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit15_bn2 = self.__batch_normalization(2, 'stage3_unit15_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit15_conv2 = self.__conv(2, name='stage3_unit15_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit15_bn3 = self.__batch_normalization(2, 'stage3_unit15_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit15_conv3 = self.__conv(2, name='stage3_unit15_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit16_bn1 = self.__batch_normalization(2, 'stage3_unit16_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit16_conv1 = self.__conv(2, name='stage3_unit16_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit16_bn2 = self.__batch_normalization(2, 'stage3_unit16_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit16_conv2 = self.__conv(2, name='stage3_unit16_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit16_bn3 = self.__batch_normalization(2, 'stage3_unit16_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit16_conv3 = self.__conv(2, name='stage3_unit16_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit17_bn1 = self.__batch_normalization(2, 'stage3_unit17_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit17_conv1 = self.__conv(2, name='stage3_unit17_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit17_bn2 = self.__batch_normalization(2, 'stage3_unit17_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit17_conv2 = self.__conv(2, name='stage3_unit17_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit17_bn3 = self.__batch_normalization(2, 'stage3_unit17_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit17_conv3 = self.__conv(2, name='stage3_unit17_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit18_bn1 = self.__batch_normalization(2, 'stage3_unit18_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit18_conv1 = self.__conv(2, name='stage3_unit18_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit18_bn2 = self.__batch_normalization(2, 'stage3_unit18_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit18_conv2 = self.__conv(2, name='stage3_unit18_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit18_bn3 = self.__batch_normalization(2, 'stage3_unit18_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit18_conv3 = self.__conv(2, name='stage3_unit18_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit19_bn1 = self.__batch_normalization(2, 'stage3_unit19_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit19_conv1 = self.__conv(2, name='stage3_unit19_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit19_bn2 = self.__batch_normalization(2, 'stage3_unit19_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit19_conv2 = self.__conv(2, name='stage3_unit19_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit19_bn3 = self.__batch_normalization(2, 'stage3_unit19_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit19_conv3 = self.__conv(2, name='stage3_unit19_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit20_bn1 = self.__batch_normalization(2, 'stage3_unit20_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit20_conv1 = self.__conv(2, name='stage3_unit20_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit20_bn2 = self.__batch_normalization(2, 'stage3_unit20_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit20_conv2 = self.__conv(2, name='stage3_unit20_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit20_bn3 = self.__batch_normalization(2, 'stage3_unit20_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit20_conv3 = self.__conv(2, name='stage3_unit20_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit21_bn1 = self.__batch_normalization(2, 'stage3_unit21_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit21_conv1 = self.__conv(2, name='stage3_unit21_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit21_bn2 = self.__batch_normalization(2, 'stage3_unit21_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit21_conv2 = self.__conv(2, name='stage3_unit21_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit21_bn3 = self.__batch_normalization(2, 'stage3_unit21_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit21_conv3 = self.__conv(2, name='stage3_unit21_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit22_bn1 = self.__batch_normalization(2, 'stage3_unit22_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit22_conv1 = self.__conv(2, name='stage3_unit22_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit22_bn2 = self.__batch_normalization(2, 'stage3_unit22_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit22_conv2 = self.__conv(2, name='stage3_unit22_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit22_bn3 = self.__batch_normalization(2, 'stage3_unit22_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit22_conv3 = self.__conv(2, name='stage3_unit22_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit23_bn1 = self.__batch_normalization(2, 'stage3_unit23_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit23_conv1 = self.__conv(2, name='stage3_unit23_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit23_bn2 = self.__batch_normalization(2, 'stage3_unit23_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit23_conv2 = self.__conv(2, name='stage3_unit23_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit23_bn3 = self.__batch_normalization(2, 'stage3_unit23_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit23_conv3 = self.__conv(2, name='stage3_unit23_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit24_bn1 = self.__batch_normalization(2, 'stage3_unit24_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit24_conv1 = self.__conv(2, name='stage3_unit24_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit24_bn2 = self.__batch_normalization(2, 'stage3_unit24_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit24_conv2 = self.__conv(2, name='stage3_unit24_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit24_bn3 = self.__batch_normalization(2, 'stage3_unit24_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit24_conv3 = self.__conv(2, name='stage3_unit24_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit25_bn1 = self.__batch_normalization(2, 'stage3_unit25_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit25_conv1 = self.__conv(2, name='stage3_unit25_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit25_bn2 = self.__batch_normalization(2, 'stage3_unit25_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit25_conv2 = self.__conv(2, name='stage3_unit25_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit25_bn3 = self.__batch_normalization(2, 'stage3_unit25_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit25_conv3 = self.__conv(2, name='stage3_unit25_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit26_bn1 = self.__batch_normalization(2, 'stage3_unit26_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit26_conv1 = self.__conv(2, name='stage3_unit26_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit26_bn2 = self.__batch_normalization(2, 'stage3_unit26_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit26_conv2 = self.__conv(2, name='stage3_unit26_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit26_bn3 = self.__batch_normalization(2, 'stage3_unit26_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit26_conv3 = self.__conv(2, name='stage3_unit26_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit27_bn1 = self.__batch_normalization(2, 'stage3_unit27_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit27_conv1 = self.__conv(2, name='stage3_unit27_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit27_bn2 = self.__batch_normalization(2, 'stage3_unit27_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit27_conv2 = self.__conv(2, name='stage3_unit27_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit27_bn3 = self.__batch_normalization(2, 'stage3_unit27_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit27_conv3 = self.__conv(2, name='stage3_unit27_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit28_bn1 = self.__batch_normalization(2, 'stage3_unit28_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit28_conv1 = self.__conv(2, name='stage3_unit28_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit28_bn2 = self.__batch_normalization(2, 'stage3_unit28_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit28_conv2 = self.__conv(2, name='stage3_unit28_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit28_bn3 = self.__batch_normalization(2, 'stage3_unit28_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit28_conv3 = self.__conv(2, name='stage3_unit28_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit29_bn1 = self.__batch_normalization(2, 'stage3_unit29_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit29_conv1 = self.__conv(2, name='stage3_unit29_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit29_bn2 = self.__batch_normalization(2, 'stage3_unit29_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit29_conv2 = self.__conv(2, name='stage3_unit29_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit29_bn3 = self.__batch_normalization(2, 'stage3_unit29_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit29_conv3 = self.__conv(2, name='stage3_unit29_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit30_bn1 = self.__batch_normalization(2, 'stage3_unit30_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit30_conv1 = self.__conv(2, name='stage3_unit30_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit30_bn2 = self.__batch_normalization(2, 'stage3_unit30_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit30_conv2 = self.__conv(2, name='stage3_unit30_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit30_bn3 = self.__batch_normalization(2, 'stage3_unit30_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit30_conv3 = self.__conv(2, name='stage3_unit30_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit31_bn1 = self.__batch_normalization(2, 'stage3_unit31_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit31_conv1 = self.__conv(2, name='stage3_unit31_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit31_bn2 = self.__batch_normalization(2, 'stage3_unit31_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit31_conv2 = self.__conv(2, name='stage3_unit31_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit31_bn3 = self.__batch_normalization(2, 'stage3_unit31_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit31_conv3 = self.__conv(2, name='stage3_unit31_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit32_bn1 = self.__batch_normalization(2, 'stage3_unit32_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit32_conv1 = self.__conv(2, name='stage3_unit32_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit32_bn2 = self.__batch_normalization(2, 'stage3_unit32_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit32_conv2 = self.__conv(2, name='stage3_unit32_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit32_bn3 = self.__batch_normalization(2, 'stage3_unit32_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit32_conv3 = self.__conv(2, name='stage3_unit32_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit33_bn1 = self.__batch_normalization(2, 'stage3_unit33_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit33_conv1 = self.__conv(2, name='stage3_unit33_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit33_bn2 = self.__batch_normalization(2, 'stage3_unit33_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit33_conv2 = self.__conv(2, name='stage3_unit33_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit33_bn3 = self.__batch_normalization(2, 'stage3_unit33_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit33_conv3 = self.__conv(2, name='stage3_unit33_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit34_bn1 = self.__batch_normalization(2, 'stage3_unit34_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit34_conv1 = self.__conv(2, name='stage3_unit34_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit34_bn2 = self.__batch_normalization(2, 'stage3_unit34_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit34_conv2 = self.__conv(2, name='stage3_unit34_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit34_bn3 = self.__batch_normalization(2, 'stage3_unit34_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit34_conv3 = self.__conv(2, name='stage3_unit34_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit35_bn1 = self.__batch_normalization(2, 'stage3_unit35_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit35_conv1 = self.__conv(2, name='stage3_unit35_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit35_bn2 = self.__batch_normalization(2, 'stage3_unit35_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit35_conv2 = self.__conv(2, name='stage3_unit35_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit35_bn3 = self.__batch_normalization(2, 'stage3_unit35_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit35_conv3 = self.__conv(2, name='stage3_unit35_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit36_bn1 = self.__batch_normalization(2, 'stage3_unit36_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit36_conv1 = self.__conv(2, name='stage3_unit36_conv1', in_channels=1024, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage3_unit36_bn2 = self.__batch_normalization(2, 'stage3_unit36_bn2', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit36_conv2 = self.__conv(2, name='stage3_unit36_conv2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage3_unit36_bn3 = self.__batch_normalization(2, 'stage3_unit36_bn3', num_features=256, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage3_unit36_conv3 = self.__conv(2, name='stage3_unit36_conv3', in_channels=256, out_channels=1024, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage4_unit1_bn1 = self.__batch_normalization(2, 'stage4_unit1_bn1', num_features=1024, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit1_conv1 = self.__conv(2, name='stage4_unit1_conv1', in_channels=1024, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage4_unit1_sc = self.__conv(2, name='stage4_unit1_sc', in_channels=1024, out_channels=2048, kernel_size=(1, 1), stride=(2, 2), groups=1, bias=False)
self.stage4_unit1_bn2 = self.__batch_normalization(2, 'stage4_unit1_bn2', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit1_conv2 = self.__conv(2, name='stage4_unit1_conv2', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=False)
self.stage4_unit1_bn3 = self.__batch_normalization(2, 'stage4_unit1_bn3', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit1_conv3 = self.__conv(2, name='stage4_unit1_conv3', in_channels=512, out_channels=2048, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage4_unit2_bn1 = self.__batch_normalization(2, 'stage4_unit2_bn1', num_features=2048, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit2_conv1 = self.__conv(2, name='stage4_unit2_conv1', in_channels=2048, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage4_unit2_bn2 = self.__batch_normalization(2, 'stage4_unit2_bn2', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit2_conv2 = self.__conv(2, name='stage4_unit2_conv2', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage4_unit2_bn3 = self.__batch_normalization(2, 'stage4_unit2_bn3', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit2_conv3 = self.__conv(2, name='stage4_unit2_conv3', in_channels=512, out_channels=2048, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage4_unit3_bn1 = self.__batch_normalization(2, 'stage4_unit3_bn1', num_features=2048, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit3_conv1 = self.__conv(2, name='stage4_unit3_conv1', in_channels=2048, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.stage4_unit3_bn2 = self.__batch_normalization(2, 'stage4_unit3_bn2', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit3_conv2 = self.__conv(2, name='stage4_unit3_conv2', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.stage4_unit3_bn3 = self.__batch_normalization(2, 'stage4_unit3_bn3', num_features=512, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.stage4_unit3_conv3 = self.__conv(2, name='stage4_unit3_conv3', in_channels=512, out_channels=2048, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.bn1 = self.__batch_normalization(2, 'bn1', num_features=2048, eps=1.9999999494757503e-05, momentum=0.8999999761581421)
self.fc1 = self.__dense(name='fc1', in_features=2048, out_features=11221, bias=True)
def forward(self, x):
bn_data = self.bn_data(x)
conv0_pad = F.pad(bn_data, (3, 3, 3, 3))
conv0 = self.conv0(conv0_pad)
bn0 = self.bn0(conv0)
relu0 = F.relu(bn0)
pooling0_pad = F.pad(relu0, (1, 1, 1, 1), value=float('-inf'))
pooling0 = F.max_pool2d(pooling0_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
stage1_unit1_bn1 = self.stage1_unit1_bn1(pooling0)
stage1_unit1_relu1 = F.relu(stage1_unit1_bn1)
stage1_unit1_conv1 = self.stage1_unit1_conv1(stage1_unit1_relu1)
stage1_unit1_sc = self.stage1_unit1_sc(stage1_unit1_relu1)
stage1_unit1_bn2 = self.stage1_unit1_bn2(stage1_unit1_conv1)
stage1_unit1_relu2 = F.relu(stage1_unit1_bn2)
stage1_unit1_conv2_pad = F.pad(stage1_unit1_relu2, (1, 1, 1, 1))
stage1_unit1_conv2 = self.stage1_unit1_conv2(stage1_unit1_conv2_pad)
stage1_unit1_bn3 = self.stage1_unit1_bn3(stage1_unit1_conv2)
stage1_unit1_relu3 = F.relu(stage1_unit1_bn3)
stage1_unit1_conv3 = self.stage1_unit1_conv3(stage1_unit1_relu3)
plus0 = (stage1_unit1_conv3 + stage1_unit1_sc)
stage1_unit2_bn1 = self.stage1_unit2_bn1(plus0)
stage1_unit2_relu1 = F.relu(stage1_unit2_bn1)
stage1_unit2_conv1 = self.stage1_unit2_conv1(stage1_unit2_relu1)
stage1_unit2_bn2 = self.stage1_unit2_bn2(stage1_unit2_conv1)
stage1_unit2_relu2 = F.relu(stage1_unit2_bn2)
stage1_unit2_conv2_pad = F.pad(stage1_unit2_relu2, (1, 1, 1, 1))
stage1_unit2_conv2 = self.stage1_unit2_conv2(stage1_unit2_conv2_pad)
stage1_unit2_bn3 = self.stage1_unit2_bn3(stage1_unit2_conv2)
stage1_unit2_relu3 = F.relu(stage1_unit2_bn3)
stage1_unit2_conv3 = self.stage1_unit2_conv3(stage1_unit2_relu3)
plus1 = (stage1_unit2_conv3 + plus0)
stage1_unit3_bn1 = self.stage1_unit3_bn1(plus1)
stage1_unit3_relu1 = F.relu(stage1_unit3_bn1)
stage1_unit3_conv1 = self.stage1_unit3_conv1(stage1_unit3_relu1)
stage1_unit3_bn2 = self.stage1_unit3_bn2(stage1_unit3_conv1)
stage1_unit3_relu2 = F.relu(stage1_unit3_bn2)
stage1_unit3_conv2_pad = F.pad(stage1_unit3_relu2, (1, 1, 1, 1))
stage1_unit3_conv2 = self.stage1_unit3_conv2(stage1_unit3_conv2_pad)
stage1_unit3_bn3 = self.stage1_unit3_bn3(stage1_unit3_conv2)
stage1_unit3_relu3 = F.relu(stage1_unit3_bn3)
stage1_unit3_conv3 = self.stage1_unit3_conv3(stage1_unit3_relu3)
plus2 = (stage1_unit3_conv3 + plus1)
stage2_unit1_bn1 = self.stage2_unit1_bn1(plus2)
stage2_unit1_relu1 = F.relu(stage2_unit1_bn1)
stage2_unit1_conv1 = self.stage2_unit1_conv1(stage2_unit1_relu1)
stage2_unit1_sc = self.stage2_unit1_sc(stage2_unit1_relu1)
stage2_unit1_bn2 = self.stage2_unit1_bn2(stage2_unit1_conv1)
stage2_unit1_relu2 = F.relu(stage2_unit1_bn2)
stage2_unit1_conv2_pad = F.pad(stage2_unit1_relu2, (1, 1, 1, 1))
stage2_unit1_conv2 = self.stage2_unit1_conv2(stage2_unit1_conv2_pad)
stage2_unit1_bn3 = self.stage2_unit1_bn3(stage2_unit1_conv2)
stage2_unit1_relu3 = F.relu(stage2_unit1_bn3)
stage2_unit1_conv3 = self.stage2_unit1_conv3(stage2_unit1_relu3)
plus3 = (stage2_unit1_conv3 + stage2_unit1_sc)
stage2_unit2_bn1 = self.stage2_unit2_bn1(plus3)
stage2_unit2_relu1 = F.relu(stage2_unit2_bn1)
stage2_unit2_conv1 = self.stage2_unit2_conv1(stage2_unit2_relu1)
stage2_unit2_bn2 = self.stage2_unit2_bn2(stage2_unit2_conv1)
stage2_unit2_relu2 = F.relu(stage2_unit2_bn2)
stage2_unit2_conv2_pad = F.pad(stage2_unit2_relu2, (1, 1, 1, 1))
stage2_unit2_conv2 = self.stage2_unit2_conv2(stage2_unit2_conv2_pad)
stage2_unit2_bn3 = self.stage2_unit2_bn3(stage2_unit2_conv2)
stage2_unit2_relu3 = F.relu(stage2_unit2_bn3)
stage2_unit2_conv3 = self.stage2_unit2_conv3(stage2_unit2_relu3)
plus4 = (stage2_unit2_conv3 + plus3)
stage2_unit3_bn1 = self.stage2_unit3_bn1(plus4)
stage2_unit3_relu1 = F.relu(stage2_unit3_bn1)
stage2_unit3_conv1 = self.stage2_unit3_conv1(stage2_unit3_relu1)
stage2_unit3_bn2 = self.stage2_unit3_bn2(stage2_unit3_conv1)
stage2_unit3_relu2 = F.relu(stage2_unit3_bn2)
stage2_unit3_conv2_pad = F.pad(stage2_unit3_relu2, (1, 1, 1, 1))
stage2_unit3_conv2 = self.stage2_unit3_conv2(stage2_unit3_conv2_pad)
stage2_unit3_bn3 = self.stage2_unit3_bn3(stage2_unit3_conv2)
stage2_unit3_relu3 = F.relu(stage2_unit3_bn3)
stage2_unit3_conv3 = self.stage2_unit3_conv3(stage2_unit3_relu3)
plus5 = (stage2_unit3_conv3 + plus4)
stage2_unit4_bn1 = self.stage2_unit4_bn1(plus5)
stage2_unit4_relu1 = F.relu(stage2_unit4_bn1)
stage2_unit4_conv1 = self.stage2_unit4_conv1(stage2_unit4_relu1)
stage2_unit4_bn2 = self.stage2_unit4_bn2(stage2_unit4_conv1)
stage2_unit4_relu2 = F.relu(stage2_unit4_bn2)
stage2_unit4_conv2_pad = F.pad(stage2_unit4_relu2, (1, 1, 1, 1))
stage2_unit4_conv2 = self.stage2_unit4_conv2(stage2_unit4_conv2_pad)
stage2_unit4_bn3 = self.stage2_unit4_bn3(stage2_unit4_conv2)
stage2_unit4_relu3 = F.relu(stage2_unit4_bn3)
stage2_unit4_conv3 = self.stage2_unit4_conv3(stage2_unit4_relu3)
plus6 = (stage2_unit4_conv3 + plus5)
stage2_unit5_bn1 = self.stage2_unit5_bn1(plus6)
stage2_unit5_relu1 = F.relu(stage2_unit5_bn1)
stage2_unit5_conv1 = self.stage2_unit5_conv1(stage2_unit5_relu1)
stage2_unit5_bn2 = self.stage2_unit5_bn2(stage2_unit5_conv1)
stage2_unit5_relu2 = F.relu(stage2_unit5_bn2)
stage2_unit5_conv2_pad = F.pad(stage2_unit5_relu2, (1, 1, 1, 1))
stage2_unit5_conv2 = self.stage2_unit5_conv2(stage2_unit5_conv2_pad)
stage2_unit5_bn3 = self.stage2_unit5_bn3(stage2_unit5_conv2)
stage2_unit5_relu3 = F.relu(stage2_unit5_bn3)
stage2_unit5_conv3 = self.stage2_unit5_conv3(stage2_unit5_relu3)
plus7 = (stage2_unit5_conv3 + plus6)
stage2_unit6_bn1 = self.stage2_unit6_bn1(plus7)
stage2_unit6_relu1 = F.relu(stage2_unit6_bn1)
stage2_unit6_conv1 = self.stage2_unit6_conv1(stage2_unit6_relu1)
stage2_unit6_bn2 = self.stage2_unit6_bn2(stage2_unit6_conv1)
stage2_unit6_relu2 = F.relu(stage2_unit6_bn2)
stage2_unit6_conv2_pad = F.pad(stage2_unit6_relu2, (1, 1, 1, 1))
stage2_unit6_conv2 = self.stage2_unit6_conv2(stage2_unit6_conv2_pad)
stage2_unit6_bn3 = self.stage2_unit6_bn3(stage2_unit6_conv2)
stage2_unit6_relu3 = F.relu(stage2_unit6_bn3)
stage2_unit6_conv3 = self.stage2_unit6_conv3(stage2_unit6_relu3)
plus8 = (stage2_unit6_conv3 + plus7)
stage2_unit7_bn1 = self.stage2_unit7_bn1(plus8)
stage2_unit7_relu1 = F.relu(stage2_unit7_bn1)
stage2_unit7_conv1 = self.stage2_unit7_conv1(stage2_unit7_relu1)
stage2_unit7_bn2 = self.stage2_unit7_bn2(stage2_unit7_conv1)
stage2_unit7_relu2 = F.relu(stage2_unit7_bn2)
stage2_unit7_conv2_pad = F.pad(stage2_unit7_relu2, (1, 1, 1, 1))
stage2_unit7_conv2 = self.stage2_unit7_conv2(stage2_unit7_conv2_pad)
stage2_unit7_bn3 = self.stage2_unit7_bn3(stage2_unit7_conv2)
stage2_unit7_relu3 = F.relu(stage2_unit7_bn3)
stage2_unit7_conv3 = self.stage2_unit7_conv3(stage2_unit7_relu3)
plus9 = (stage2_unit7_conv3 + plus8)
stage2_unit8_bn1 = self.stage2_unit8_bn1(plus9)
stage2_unit8_relu1 = F.relu(stage2_unit8_bn1)
stage2_unit8_conv1 = self.stage2_unit8_conv1(stage2_unit8_relu1)
stage2_unit8_bn2 = self.stage2_unit8_bn2(stage2_unit8_conv1)
stage2_unit8_relu2 = F.relu(stage2_unit8_bn2)
stage2_unit8_conv2_pad = F.pad(stage2_unit8_relu2, (1, 1, 1, 1))
stage2_unit8_conv2 = self.stage2_unit8_conv2(stage2_unit8_conv2_pad)
stage2_unit8_bn3 = self.stage2_unit8_bn3(stage2_unit8_conv2)
stage2_unit8_relu3 = F.relu(stage2_unit8_bn3)
stage2_unit8_conv3 = self.stage2_unit8_conv3(stage2_unit8_relu3)
plus10 = (stage2_unit8_conv3 + plus9)
stage3_unit1_bn1 = self.stage3_unit1_bn1(plus10)
stage3_unit1_relu1 = F.relu(stage3_unit1_bn1)
stage3_unit1_conv1 = self.stage3_unit1_conv1(stage3_unit1_relu1)
stage3_unit1_sc = self.stage3_unit1_sc(stage3_unit1_relu1)
stage3_unit1_bn2 = self.stage3_unit1_bn2(stage3_unit1_conv1)
stage3_unit1_relu2 = F.relu(stage3_unit1_bn2)
stage3_unit1_conv2_pad = F.pad(stage3_unit1_relu2, (1, 1, 1, 1))
stage3_unit1_conv2 = self.stage3_unit1_conv2(stage3_unit1_conv2_pad)
stage3_unit1_bn3 = self.stage3_unit1_bn3(stage3_unit1_conv2)
stage3_unit1_relu3 = F.relu(stage3_unit1_bn3)
stage3_unit1_conv3 = self.stage3_unit1_conv3(stage3_unit1_relu3)
plus11 = (stage3_unit1_conv3 + stage3_unit1_sc)
stage3_unit2_bn1 = self.stage3_unit2_bn1(plus11)
stage3_unit2_relu1 = F.relu(stage3_unit2_bn1)
stage3_unit2_conv1 = self.stage3_unit2_conv1(stage3_unit2_relu1)
stage3_unit2_bn2 = self.stage3_unit2_bn2(stage3_unit2_conv1)
stage3_unit2_relu2 = F.relu(stage3_unit2_bn2)
stage3_unit2_conv2_pad = F.pad(stage3_unit2_relu2, (1, 1, 1, 1))
stage3_unit2_conv2 = self.stage3_unit2_conv2(stage3_unit2_conv2_pad)
stage3_unit2_bn3 = self.stage3_unit2_bn3(stage3_unit2_conv2)
stage3_unit2_relu3 = F.relu(stage3_unit2_bn3)
stage3_unit2_conv3 = self.stage3_unit2_conv3(stage3_unit2_relu3)
plus12 = (stage3_unit2_conv3 + plus11)
stage3_unit3_bn1 = self.stage3_unit3_bn1(plus12)
stage3_unit3_relu1 = F.relu(stage3_unit3_bn1)
stage3_unit3_conv1 = self.stage3_unit3_conv1(stage3_unit3_relu1)
stage3_unit3_bn2 = self.stage3_unit3_bn2(stage3_unit3_conv1)
stage3_unit3_relu2 = F.relu(stage3_unit3_bn2)
stage3_unit3_conv2_pad = F.pad(stage3_unit3_relu2, (1, 1, 1, 1))
stage3_unit3_conv2 = self.stage3_unit3_conv2(stage3_unit3_conv2_pad)
stage3_unit3_bn3 = self.stage3_unit3_bn3(stage3_unit3_conv2)
stage3_unit3_relu3 = F.relu(stage3_unit3_bn3)
stage3_unit3_conv3 = self.stage3_unit3_conv3(stage3_unit3_relu3)
plus13 = (stage3_unit3_conv3 + plus12)
stage3_unit4_bn1 = self.stage3_unit4_bn1(plus13)
stage3_unit4_relu1 = F.relu(stage3_unit4_bn1)
stage3_unit4_conv1 = self.stage3_unit4_conv1(stage3_unit4_relu1)
stage3_unit4_bn2 = self.stage3_unit4_bn2(stage3_unit4_conv1)
stage3_unit4_relu2 = F.relu(stage3_unit4_bn2)
stage3_unit4_conv2_pad = F.pad(stage3_unit4_relu2, (1, 1, 1, 1))
stage3_unit4_conv2 = self.stage3_unit4_conv2(stage3_unit4_conv2_pad)
stage3_unit4_bn3 = self.stage3_unit4_bn3(stage3_unit4_conv2)
stage3_unit4_relu3 = F.relu(stage3_unit4_bn3)
stage3_unit4_conv3 = self.stage3_unit4_conv3(stage3_unit4_relu3)
plus14 = (stage3_unit4_conv3 + plus13)
stage3_unit5_bn1 = self.stage3_unit5_bn1(plus14)
stage3_unit5_relu1 = F.relu(stage3_unit5_bn1)
stage3_unit5_conv1 = self.stage3_unit5_conv1(stage3_unit5_relu1)
stage3_unit5_bn2 = self.stage3_unit5_bn2(stage3_unit5_conv1)
stage3_unit5_relu2 = F.relu(stage3_unit5_bn2)
stage3_unit5_conv2_pad = F.pad(stage3_unit5_relu2, (1, 1, 1, 1))
stage3_unit5_conv2 = self.stage3_unit5_conv2(stage3_unit5_conv2_pad)
stage3_unit5_bn3 = self.stage3_unit5_bn3(stage3_unit5_conv2)
stage3_unit5_relu3 = F.relu(stage3_unit5_bn3)
stage3_unit5_conv3 = self.stage3_unit5_conv3(stage3_unit5_relu3)
plus15 = (stage3_unit5_conv3 + plus14)
stage3_unit6_bn1 = self.stage3_unit6_bn1(plus15)
stage3_unit6_relu1 = F.relu(stage3_unit6_bn1)
stage3_unit6_conv1 = self.stage3_unit6_conv1(stage3_unit6_relu1)
stage3_unit6_bn2 = self.stage3_unit6_bn2(stage3_unit6_conv1)
stage3_unit6_relu2 = F.relu(stage3_unit6_bn2)
stage3_unit6_conv2_pad = F.pad(stage3_unit6_relu2, (1, 1, 1, 1))
stage3_unit6_conv2 = self.stage3_unit6_conv2(stage3_unit6_conv2_pad)
stage3_unit6_bn3 = self.stage3_unit6_bn3(stage3_unit6_conv2)
stage3_unit6_relu3 = F.relu(stage3_unit6_bn3)
stage3_unit6_conv3 = self.stage3_unit6_conv3(stage3_unit6_relu3)
plus16 = (stage3_unit6_conv3 + plus15)
stage3_unit7_bn1 = self.stage3_unit7_bn1(plus16)
stage3_unit7_relu1 = F.relu(stage3_unit7_bn1)
stage3_unit7_conv1 = self.stage3_unit7_conv1(stage3_unit7_relu1)
stage3_unit7_bn2 = self.stage3_unit7_bn2(stage3_unit7_conv1)
stage3_unit7_relu2 = F.relu(stage3_unit7_bn2)
stage3_unit7_conv2_pad = F.pad(stage3_unit7_relu2, (1, 1, 1, 1))
stage3_unit7_conv2 = self.stage3_unit7_conv2(stage3_unit7_conv2_pad)
stage3_unit7_bn3 = self.stage3_unit7_bn3(stage3_unit7_conv2)
stage3_unit7_relu3 = F.relu(stage3_unit7_bn3)
stage3_unit7_conv3 = self.stage3_unit7_conv3(stage3_unit7_relu3)
plus17 = (stage3_unit7_conv3 + plus16)
stage3_unit8_bn1 = self.stage3_unit8_bn1(plus17)
stage3_unit8_relu1 = F.relu(stage3_unit8_bn1)
stage3_unit8_conv1 = self.stage3_unit8_conv1(stage3_unit8_relu1)
stage3_unit8_bn2 = self.stage3_unit8_bn2(stage3_unit8_conv1)
stage3_unit8_relu2 = F.relu(stage3_unit8_bn2)
stage3_unit8_conv2_pad = F.pad(stage3_unit8_relu2, (1, 1, 1, 1))
stage3_unit8_conv2 = self.stage3_unit8_conv2(stage3_unit8_conv2_pad)
stage3_unit8_bn3 = self.stage3_unit8_bn3(stage3_unit8_conv2)
stage3_unit8_relu3 = F.relu(stage3_unit8_bn3)
stage3_unit8_conv3 = self.stage3_unit8_conv3(stage3_unit8_relu3)
plus18 = (stage3_unit8_conv3 + plus17)
stage3_unit9_bn1 = self.stage3_unit9_bn1(plus18)
stage3_unit9_relu1 = F.relu(stage3_unit9_bn1)
stage3_unit9_conv1 = self.stage3_unit9_conv1(stage3_unit9_relu1)
stage3_unit9_bn2 = self.stage3_unit9_bn2(stage3_unit9_conv1)
stage3_unit9_relu2 = F.relu(stage3_unit9_bn2)
stage3_unit9_conv2_pad = F.pad(stage3_unit9_relu2, (1, 1, 1, 1))
stage3_unit9_conv2 = self.stage3_unit9_conv2(stage3_unit9_conv2_pad)
stage3_unit9_bn3 = self.stage3_unit9_bn3(stage3_unit9_conv2)
stage3_unit9_relu3 = F.relu(stage3_unit9_bn3)
stage3_unit9_conv3 = self.stage3_unit9_conv3(stage3_unit9_relu3)
plus19 = (stage3_unit9_conv3 + plus18)
stage3_unit10_bn1 = self.stage3_unit10_bn1(plus19)
stage3_unit10_relu1 = F.relu(stage3_unit10_bn1)
stage3_unit10_conv1 = self.stage3_unit10_conv1(stage3_unit10_relu1)
stage3_unit10_bn2 = self.stage3_unit10_bn2(stage3_unit10_conv1)
stage3_unit10_relu2 = F.relu(stage3_unit10_bn2)
stage3_unit10_conv2_pad = F.pad(stage3_unit10_relu2, (1, 1, 1, 1))
stage3_unit10_conv2 = self.stage3_unit10_conv2(stage3_unit10_conv2_pad)
stage3_unit10_bn3 = self.stage3_unit10_bn3(stage3_unit10_conv2)
stage3_unit10_relu3 = F.relu(stage3_unit10_bn3)
stage3_unit10_conv3 = self.stage3_unit10_conv3(stage3_unit10_relu3)
plus20 = (stage3_unit10_conv3 + plus19)
stage3_unit11_bn1 = self.stage3_unit11_bn1(plus20)
stage3_unit11_relu1 = F.relu(stage3_unit11_bn1)
stage3_unit11_conv1 = self.stage3_unit11_conv1(stage3_unit11_relu1)
stage3_unit11_bn2 = self.stage3_unit11_bn2(stage3_unit11_conv1)
stage3_unit11_relu2 = F.relu(stage3_unit11_bn2)
stage3_unit11_conv2_pad = F.pad(stage3_unit11_relu2, (1, 1, 1, 1))
stage3_unit11_conv2 = self.stage3_unit11_conv2(stage3_unit11_conv2_pad)
stage3_unit11_bn3 = self.stage3_unit11_bn3(stage3_unit11_conv2)
stage3_unit11_relu3 = F.relu(stage3_unit11_bn3)
stage3_unit11_conv3 = self.stage3_unit11_conv3(stage3_unit11_relu3)
plus21 = (stage3_unit11_conv3 + plus20)
stage3_unit12_bn1 = self.stage3_unit12_bn1(plus21)
stage3_unit12_relu1 = F.relu(stage3_unit12_bn1)
stage3_unit12_conv1 = self.stage3_unit12_conv1(stage3_unit12_relu1)
stage3_unit12_bn2 = self.stage3_unit12_bn2(stage3_unit12_conv1)
stage3_unit12_relu2 = F.relu(stage3_unit12_bn2)
stage3_unit12_conv2_pad = F.pad(stage3_unit12_relu2, (1, 1, 1, 1))
stage3_unit12_conv2 = self.stage3_unit12_conv2(stage3_unit12_conv2_pad)
stage3_unit12_bn3 = self.stage3_unit12_bn3(stage3_unit12_conv2)
stage3_unit12_relu3 = F.relu(stage3_unit12_bn3)
stage3_unit12_conv3 = self.stage3_unit12_conv3(stage3_unit12_relu3)
plus22 = (stage3_unit12_conv3 + plus21)
stage3_unit13_bn1 = self.stage3_unit13_bn1(plus22)
stage3_unit13_relu1 = F.relu(stage3_unit13_bn1)
stage3_unit13_conv1 = self.stage3_unit13_conv1(stage3_unit13_relu1)
stage3_unit13_bn2 = self.stage3_unit13_bn2(stage3_unit13_conv1)
stage3_unit13_relu2 = F.relu(stage3_unit13_bn2)
stage3_unit13_conv2_pad = F.pad(stage3_unit13_relu2, (1, 1, 1, 1))
stage3_unit13_conv2 = self.stage3_unit13_conv2(stage3_unit13_conv2_pad)
stage3_unit13_bn3 = self.stage3_unit13_bn3(stage3_unit13_conv2)
stage3_unit13_relu3 = F.relu(stage3_unit13_bn3)
stage3_unit13_conv3 = self.stage3_unit13_conv3(stage3_unit13_relu3)
plus23 = (stage3_unit13_conv3 + plus22)
stage3_unit14_bn1 = self.stage3_unit14_bn1(plus23)
stage3_unit14_relu1 = F.relu(stage3_unit14_bn1)
stage3_unit14_conv1 = self.stage3_unit14_conv1(stage3_unit14_relu1)
stage3_unit14_bn2 = self.stage3_unit14_bn2(stage3_unit14_conv1)
stage3_unit14_relu2 = F.relu(stage3_unit14_bn2)
stage3_unit14_conv2_pad = F.pad(stage3_unit14_relu2, (1, 1, 1, 1))
stage3_unit14_conv2 = self.stage3_unit14_conv2(stage3_unit14_conv2_pad)
stage3_unit14_bn3 = self.stage3_unit14_bn3(stage3_unit14_conv2)
stage3_unit14_relu3 = F.relu(stage3_unit14_bn3)
stage3_unit14_conv3 = self.stage3_unit14_conv3(stage3_unit14_relu3)
plus24 = (stage3_unit14_conv3 + plus23)
stage3_unit15_bn1 = self.stage3_unit15_bn1(plus24)
stage3_unit15_relu1 = F.relu(stage3_unit15_bn1)
stage3_unit15_conv1 = self.stage3_unit15_conv1(stage3_unit15_relu1)
stage3_unit15_bn2 = self.stage3_unit15_bn2(stage3_unit15_conv1)
stage3_unit15_relu2 = F.relu(stage3_unit15_bn2)
stage3_unit15_conv2_pad = F.pad(stage3_unit15_relu2, (1, 1, 1, 1))
stage3_unit15_conv2 = self.stage3_unit15_conv2(stage3_unit15_conv2_pad)
stage3_unit15_bn3 = self.stage3_unit15_bn3(stage3_unit15_conv2)
stage3_unit15_relu3 = F.relu(stage3_unit15_bn3)
stage3_unit15_conv3 = self.stage3_unit15_conv3(stage3_unit15_relu3)
plus25 = (stage3_unit15_conv3 + plus24)
stage3_unit16_bn1 = self.stage3_unit16_bn1(plus25)
stage3_unit16_relu1 = F.relu(stage3_unit16_bn1)
stage3_unit16_conv1 = self.stage3_unit16_conv1(stage3_unit16_relu1)
stage3_unit16_bn2 = self.stage3_unit16_bn2(stage3_unit16_conv1)
stage3_unit16_relu2 = F.relu(stage3_unit16_bn2)
stage3_unit16_conv2_pad = F.pad(stage3_unit16_relu2, (1, 1, 1, 1))
stage3_unit16_conv2 = self.stage3_unit16_conv2(stage3_unit16_conv2_pad)
stage3_unit16_bn3 = self.stage3_unit16_bn3(stage3_unit16_conv2)
stage3_unit16_relu3 = F.relu(stage3_unit16_bn3)
stage3_unit16_conv3 = self.stage3_unit16_conv3(stage3_unit16_relu3)
plus26 = (stage3_unit16_conv3 + plus25)
stage3_unit17_bn1 = self.stage3_unit17_bn1(plus26)
stage3_unit17_relu1 = F.relu(stage3_unit17_bn1)
stage3_unit17_conv1 = self.stage3_unit17_conv1(stage3_unit17_relu1)
stage3_unit17_bn2 = self.stage3_unit17_bn2(stage3_unit17_conv1)
stage3_unit17_relu2 = F.relu(stage3_unit17_bn2)
stage3_unit17_conv2_pad = F.pad(stage3_unit17_relu2, (1, 1, 1, 1))
stage3_unit17_conv2 = self.stage3_unit17_conv2(stage3_unit17_conv2_pad)
stage3_unit17_bn3 = self.stage3_unit17_bn3(stage3_unit17_conv2)
stage3_unit17_relu3 = F.relu(stage3_unit17_bn3)
stage3_unit17_conv3 = self.stage3_unit17_conv3(stage3_unit17_relu3)
plus27 = (stage3_unit17_conv3 + plus26)
stage3_unit18_bn1 = self.stage3_unit18_bn1(plus27)
stage3_unit18_relu1 = F.relu(stage3_unit18_bn1)
stage3_unit18_conv1 = self.stage3_unit18_conv1(stage3_unit18_relu1)
stage3_unit18_bn2 = self.stage3_unit18_bn2(stage3_unit18_conv1)
stage3_unit18_relu2 = F.relu(stage3_unit18_bn2)
stage3_unit18_conv2_pad = F.pad(stage3_unit18_relu2, (1, 1, 1, 1))
stage3_unit18_conv2 = self.stage3_unit18_conv2(stage3_unit18_conv2_pad)
stage3_unit18_bn3 = self.stage3_unit18_bn3(stage3_unit18_conv2)
stage3_unit18_relu3 = F.relu(stage3_unit18_bn3)
stage3_unit18_conv3 = self.stage3_unit18_conv3(stage3_unit18_relu3)
plus28 = (stage3_unit18_conv3 + plus27)
stage3_unit19_bn1 = self.stage3_unit19_bn1(plus28)
stage3_unit19_relu1 = F.relu(stage3_unit19_bn1)
stage3_unit19_conv1 = self.stage3_unit19_conv1(stage3_unit19_relu1)
stage3_unit19_bn2 = self.stage3_unit19_bn2(stage3_unit19_conv1)
stage3_unit19_relu2 = F.relu(stage3_unit19_bn2)
stage3_unit19_conv2_pad = F.pad(stage3_unit19_relu2, (1, 1, 1, 1))
stage3_unit19_conv2 = self.stage3_unit19_conv2(stage3_unit19_conv2_pad)
stage3_unit19_bn3 = self.stage3_unit19_bn3(stage3_unit19_conv2)
stage3_unit19_relu3 = F.relu(stage3_unit19_bn3)
stage3_unit19_conv3 = self.stage3_unit19_conv3(stage3_unit19_relu3)
plus29 = (stage3_unit19_conv3 + plus28)
stage3_unit20_bn1 = self.stage3_unit20_bn1(plus29)
stage3_unit20_relu1 = F.relu(stage3_unit20_bn1)
stage3_unit20_conv1 = self.stage3_unit20_conv1(stage3_unit20_relu1)
stage3_unit20_bn2 = self.stage3_unit20_bn2(stage3_unit20_conv1)
stage3_unit20_relu2 = F.relu(stage3_unit20_bn2)
stage3_unit20_conv2_pad = F.pad(stage3_unit20_relu2, (1, 1, 1, 1))
stage3_unit20_conv2 = self.stage3_unit20_conv2(stage3_unit20_conv2_pad)
stage3_unit20_bn3 = self.stage3_unit20_bn3(stage3_unit20_conv2)
stage3_unit20_relu3 = F.relu(stage3_unit20_bn3)
stage3_unit20_conv3 = self.stage3_unit20_conv3(stage3_unit20_relu3)
plus30 = (stage3_unit20_conv3 + plus29)
stage3_unit21_bn1 = self.stage3_unit21_bn1(plus30)
stage3_unit21_relu1 = F.relu(stage3_unit21_bn1)
stage3_unit21_conv1 = self.stage3_unit21_conv1(stage3_unit21_relu1)
stage3_unit21_bn2 = self.stage3_unit21_bn2(stage3_unit21_conv1)
stage3_unit21_relu2 = F.relu(stage3_unit21_bn2)
stage3_unit21_conv2_pad = F.pad(stage3_unit21_relu2, (1, 1, 1, 1))
stage3_unit21_conv2 = self.stage3_unit21_conv2(stage3_unit21_conv2_pad)
stage3_unit21_bn3 = self.stage3_unit21_bn3(stage3_unit21_conv2)
stage3_unit21_relu3 = F.relu(stage3_unit21_bn3)
stage3_unit21_conv3 = self.stage3_unit21_conv3(stage3_unit21_relu3)
plus31 = (stage3_unit21_conv3 + plus30)
stage3_unit22_bn1 = self.stage3_unit22_bn1(plus31)
stage3_unit22_relu1 = F.relu(stage3_unit22_bn1)
stage3_unit22_conv1 = self.stage3_unit22_conv1(stage3_unit22_relu1)
stage3_unit22_bn2 = self.stage3_unit22_bn2(stage3_unit22_conv1)
stage3_unit22_relu2 = F.relu(stage3_unit22_bn2)
stage3_unit22_conv2_pad = F.pad(stage3_unit22_relu2, (1, 1, 1, 1))
stage3_unit22_conv2 = self.stage3_unit22_conv2(stage3_unit22_conv2_pad)
stage3_unit22_bn3 = self.stage3_unit22_bn3(stage3_unit22_conv2)
stage3_unit22_relu3 = F.relu(stage3_unit22_bn3)
stage3_unit22_conv3 = self.stage3_unit22_conv3(stage3_unit22_relu3)
plus32 = (stage3_unit22_conv3 + plus31)
stage3_unit23_bn1 = self.stage3_unit23_bn1(plus32)
stage3_unit23_relu1 = F.relu(stage3_unit23_bn1)
stage3_unit23_conv1 = self.stage3_unit23_conv1(stage3_unit23_relu1)
stage3_unit23_bn2 = self.stage3_unit23_bn2(stage3_unit23_conv1)
stage3_unit23_relu2 = F.relu(stage3_unit23_bn2)
stage3_unit23_conv2_pad = F.pad(stage3_unit23_relu2, (1, 1, 1, 1))
stage3_unit23_conv2 = self.stage3_unit23_conv2(stage3_unit23_conv2_pad)
stage3_unit23_bn3 = self.stage3_unit23_bn3(stage3_unit23_conv2)
stage3_unit23_relu3 = F.relu(stage3_unit23_bn3)
stage3_unit23_conv3 = self.stage3_unit23_conv3(stage3_unit23_relu3)
plus33 = (stage3_unit23_conv3 + plus32)
stage3_unit24_bn1 = self.stage3_unit24_bn1(plus33)
stage3_unit24_relu1 = F.relu(stage3_unit24_bn1)
stage3_unit24_conv1 = self.stage3_unit24_conv1(stage3_unit24_relu1)
stage3_unit24_bn2 = self.stage3_unit24_bn2(stage3_unit24_conv1)
stage3_unit24_relu2 = F.relu(stage3_unit24_bn2)
stage3_unit24_conv2_pad = F.pad(stage3_unit24_relu2, (1, 1, 1, 1))
stage3_unit24_conv2 = self.stage3_unit24_conv2(stage3_unit24_conv2_pad)
stage3_unit24_bn3 = self.stage3_unit24_bn3(stage3_unit24_conv2)
stage3_unit24_relu3 = F.relu(stage3_unit24_bn3)
stage3_unit24_conv3 = self.stage3_unit24_conv3(stage3_unit24_relu3)
plus34 = (stage3_unit24_conv3 + plus33)
stage3_unit25_bn1 = self.stage3_unit25_bn1(plus34)
stage3_unit25_relu1 = F.relu(stage3_unit25_bn1)
stage3_unit25_conv1 = self.stage3_unit25_conv1(stage3_unit25_relu1)
stage3_unit25_bn2 = self.stage3_unit25_bn2(stage3_unit25_conv1)
stage3_unit25_relu2 = F.relu(stage3_unit25_bn2)
stage3_unit25_conv2_pad = F.pad(stage3_unit25_relu2, (1, 1, 1, 1))
stage3_unit25_conv2 = self.stage3_unit25_conv2(stage3_unit25_conv2_pad)
stage3_unit25_bn3 = self.stage3_unit25_bn3(stage3_unit25_conv2)
stage3_unit25_relu3 = F.relu(stage3_unit25_bn3)
stage3_unit25_conv3 = self.stage3_unit25_conv3(stage3_unit25_relu3)
plus35 = (stage3_unit25_conv3 + plus34)
stage3_unit26_bn1 = self.stage3_unit26_bn1(plus35)
stage3_unit26_relu1 = F.relu(stage3_unit26_bn1)
stage3_unit26_conv1 = self.stage3_unit26_conv1(stage3_unit26_relu1)
stage3_unit26_bn2 = self.stage3_unit26_bn2(stage3_unit26_conv1)
stage3_unit26_relu2 = F.relu(stage3_unit26_bn2)
stage3_unit26_conv2_pad = F.pad(stage3_unit26_relu2, (1, 1, 1, 1))
stage3_unit26_conv2 = self.stage3_unit26_conv2(stage3_unit26_conv2_pad)
stage3_unit26_bn3 = self.stage3_unit26_bn3(stage3_unit26_conv2)
stage3_unit26_relu3 = F.relu(stage3_unit26_bn3)
stage3_unit26_conv3 = self.stage3_unit26_conv3(stage3_unit26_relu3)
plus36 = (stage3_unit26_conv3 + plus35)
stage3_unit27_bn1 = self.stage3_unit27_bn1(plus36)
stage3_unit27_relu1 = F.relu(stage3_unit27_bn1)
stage3_unit27_conv1 = self.stage3_unit27_conv1(stage3_unit27_relu1)
stage3_unit27_bn2 = self.stage3_unit27_bn2(stage3_unit27_conv1)
stage3_unit27_relu2 = F.relu(stage3_unit27_bn2)
stage3_unit27_conv2_pad = F.pad(stage3_unit27_relu2, (1, 1, 1, 1))
stage3_unit27_conv2 = self.stage3_unit27_conv2(stage3_unit27_conv2_pad)
stage3_unit27_bn3 = self.stage3_unit27_bn3(stage3_unit27_conv2)
stage3_unit27_relu3 = F.relu(stage3_unit27_bn3)
stage3_unit27_conv3 = self.stage3_unit27_conv3(stage3_unit27_relu3)
plus37 = (stage3_unit27_conv3 + plus36)
stage3_unit28_bn1 = self.stage3_unit28_bn1(plus37)
stage3_unit28_relu1 = F.relu(stage3_unit28_bn1)
stage3_unit28_conv1 = self.stage3_unit28_conv1(stage3_unit28_relu1)
stage3_unit28_bn2 = self.stage3_unit28_bn2(stage3_unit28_conv1)
stage3_unit28_relu2 = F.relu(stage3_unit28_bn2)
stage3_unit28_conv2_pad = F.pad(stage3_unit28_relu2, (1, 1, 1, 1))
stage3_unit28_conv2 = self.stage3_unit28_conv2(stage3_unit28_conv2_pad)
stage3_unit28_bn3 = self.stage3_unit28_bn3(stage3_unit28_conv2)
stage3_unit28_relu3 = F.relu(stage3_unit28_bn3)
stage3_unit28_conv3 = self.stage3_unit28_conv3(stage3_unit28_relu3)
plus38 = (stage3_unit28_conv3 + plus37)
stage3_unit29_bn1 = self.stage3_unit29_bn1(plus38)
stage3_unit29_relu1 = F.relu(stage3_unit29_bn1)
stage3_unit29_conv1 = self.stage3_unit29_conv1(stage3_unit29_relu1)
stage3_unit29_bn2 = self.stage3_unit29_bn2(stage3_unit29_conv1)
stage3_unit29_relu2 = F.relu(stage3_unit29_bn2)
stage3_unit29_conv2_pad = F.pad(stage3_unit29_relu2, (1, 1, 1, 1))
stage3_unit29_conv2 = self.stage3_unit29_conv2(stage3_unit29_conv2_pad)
stage3_unit29_bn3 = self.stage3_unit29_bn3(stage3_unit29_conv2)
stage3_unit29_relu3 = F.relu(stage3_unit29_bn3)
stage3_unit29_conv3 = self.stage3_unit29_conv3(stage3_unit29_relu3)
plus39 = (stage3_unit29_conv3 + plus38)
stage3_unit30_bn1 = self.stage3_unit30_bn1(plus39)
stage3_unit30_relu1 = F.relu(stage3_unit30_bn1)
stage3_unit30_conv1 = self.stage3_unit30_conv1(stage3_unit30_relu1)
stage3_unit30_bn2 = self.stage3_unit30_bn2(stage3_unit30_conv1)
stage3_unit30_relu2 = F.relu(stage3_unit30_bn2)
stage3_unit30_conv2_pad = F.pad(stage3_unit30_relu2, (1, 1, 1, 1))
stage3_unit30_conv2 = self.stage3_unit30_conv2(stage3_unit30_conv2_pad)
stage3_unit30_bn3 = self.stage3_unit30_bn3(stage3_unit30_conv2)
stage3_unit30_relu3 = F.relu(stage3_unit30_bn3)
stage3_unit30_conv3 = self.stage3_unit30_conv3(stage3_unit30_relu3)
plus40 = (stage3_unit30_conv3 + plus39)
stage3_unit31_bn1 = self.stage3_unit31_bn1(plus40)
stage3_unit31_relu1 = F.relu(stage3_unit31_bn1)
stage3_unit31_conv1 = self.stage3_unit31_conv1(stage3_unit31_relu1)
stage3_unit31_bn2 = self.stage3_unit31_bn2(stage3_unit31_conv1)
stage3_unit31_relu2 = F.relu(stage3_unit31_bn2)
stage3_unit31_conv2_pad = F.pad(stage3_unit31_relu2, (1, 1, 1, 1))
stage3_unit31_conv2 = self.stage3_unit31_conv2(stage3_unit31_conv2_pad)
stage3_unit31_bn3 = self.stage3_unit31_bn3(stage3_unit31_conv2)
stage3_unit31_relu3 = F.relu(stage3_unit31_bn3)
stage3_unit31_conv3 = self.stage3_unit31_conv3(stage3_unit31_relu3)
plus41 = (stage3_unit31_conv3 + plus40)
stage3_unit32_bn1 = self.stage3_unit32_bn1(plus41)
stage3_unit32_relu1 = F.relu(stage3_unit32_bn1)
stage3_unit32_conv1 = self.stage3_unit32_conv1(stage3_unit32_relu1)
stage3_unit32_bn2 = self.stage3_unit32_bn2(stage3_unit32_conv1)
stage3_unit32_relu2 = F.relu(stage3_unit32_bn2)
stage3_unit32_conv2_pad = F.pad(stage3_unit32_relu2, (1, 1, 1, 1))
stage3_unit32_conv2 = self.stage3_unit32_conv2(stage3_unit32_conv2_pad)
stage3_unit32_bn3 = self.stage3_unit32_bn3(stage3_unit32_conv2)
stage3_unit32_relu3 = F.relu(stage3_unit32_bn3)
stage3_unit32_conv3 = self.stage3_unit32_conv3(stage3_unit32_relu3)
plus42 = (stage3_unit32_conv3 + plus41)
stage3_unit33_bn1 = self.stage3_unit33_bn1(plus42)
stage3_unit33_relu1 = F.relu(stage3_unit33_bn1)
stage3_unit33_conv1 = self.stage3_unit33_conv1(stage3_unit33_relu1)
stage3_unit33_bn2 = self.stage3_unit33_bn2(stage3_unit33_conv1)
stage3_unit33_relu2 = F.relu(stage3_unit33_bn2)
stage3_unit33_conv2_pad = F.pad(stage3_unit33_relu2, (1, 1, 1, 1))
stage3_unit33_conv2 = self.stage3_unit33_conv2(stage3_unit33_conv2_pad)
stage3_unit33_bn3 = self.stage3_unit33_bn3(stage3_unit33_conv2)
stage3_unit33_relu3 = F.relu(stage3_unit33_bn3)
stage3_unit33_conv3 = self.stage3_unit33_conv3(stage3_unit33_relu3)
plus43 = (stage3_unit33_conv3 + plus42)
stage3_unit34_bn1 = self.stage3_unit34_bn1(plus43)
stage3_unit34_relu1 = F.relu(stage3_unit34_bn1)
stage3_unit34_conv1 = self.stage3_unit34_conv1(stage3_unit34_relu1)
stage3_unit34_bn2 = self.stage3_unit34_bn2(stage3_unit34_conv1)
stage3_unit34_relu2 = F.relu(stage3_unit34_bn2)
stage3_unit34_conv2_pad = F.pad(stage3_unit34_relu2, (1, 1, 1, 1))
stage3_unit34_conv2 = self.stage3_unit34_conv2(stage3_unit34_conv2_pad)
stage3_unit34_bn3 = self.stage3_unit34_bn3(stage3_unit34_conv2)
stage3_unit34_relu3 = F.relu(stage3_unit34_bn3)
stage3_unit34_conv3 = self.stage3_unit34_conv3(stage3_unit34_relu3)
plus44 = (stage3_unit34_conv3 + plus43)
stage3_unit35_bn1 = self.stage3_unit35_bn1(plus44)
stage3_unit35_relu1 = F.relu(stage3_unit35_bn1)
stage3_unit35_conv1 = self.stage3_unit35_conv1(stage3_unit35_relu1)
stage3_unit35_bn2 = self.stage3_unit35_bn2(stage3_unit35_conv1)
stage3_unit35_relu2 = F.relu(stage3_unit35_bn2)
stage3_unit35_conv2_pad = F.pad(stage3_unit35_relu2, (1, 1, 1, 1))
stage3_unit35_conv2 = self.stage3_unit35_conv2(stage3_unit35_conv2_pad)
stage3_unit35_bn3 = self.stage3_unit35_bn3(stage3_unit35_conv2)
stage3_unit35_relu3 = F.relu(stage3_unit35_bn3)
stage3_unit35_conv3 = self.stage3_unit35_conv3(stage3_unit35_relu3)
plus45 = (stage3_unit35_conv3 + plus44)
stage3_unit36_bn1 = self.stage3_unit36_bn1(plus45)
stage3_unit36_relu1 = F.relu(stage3_unit36_bn1)
stage3_unit36_conv1 = self.stage3_unit36_conv1(stage3_unit36_relu1)
stage3_unit36_bn2 = self.stage3_unit36_bn2(stage3_unit36_conv1)
stage3_unit36_relu2 = F.relu(stage3_unit36_bn2)
stage3_unit36_conv2_pad = F.pad(stage3_unit36_relu2, (1, 1, 1, 1))
stage3_unit36_conv2 = self.stage3_unit36_conv2(stage3_unit36_conv2_pad)
stage3_unit36_bn3 = self.stage3_unit36_bn3(stage3_unit36_conv2)
stage3_unit36_relu3 = F.relu(stage3_unit36_bn3)
stage3_unit36_conv3 = self.stage3_unit36_conv3(stage3_unit36_relu3)
plus46 = (stage3_unit36_conv3 + plus45)
stage4_unit1_bn1 = self.stage4_unit1_bn1(plus46)
stage4_unit1_relu1 = F.relu(stage4_unit1_bn1)
stage4_unit1_conv1 = self.stage4_unit1_conv1(stage4_unit1_relu1)
stage4_unit1_sc = self.stage4_unit1_sc(stage4_unit1_relu1)
stage4_unit1_bn2 = self.stage4_unit1_bn2(stage4_unit1_conv1)
stage4_unit1_relu2 = F.relu(stage4_unit1_bn2)
stage4_unit1_conv2_pad = F.pad(stage4_unit1_relu2, (1, 1, 1, 1))
stage4_unit1_conv2 = self.stage4_unit1_conv2(stage4_unit1_conv2_pad)
stage4_unit1_bn3 = self.stage4_unit1_bn3(stage4_unit1_conv2)
stage4_unit1_relu3 = F.relu(stage4_unit1_bn3)
stage4_unit1_conv3 = self.stage4_unit1_conv3(stage4_unit1_relu3)
plus47 = (stage4_unit1_conv3 + stage4_unit1_sc)
stage4_unit2_bn1 = self.stage4_unit2_bn1(plus47)
stage4_unit2_relu1 = F.relu(stage4_unit2_bn1)
stage4_unit2_conv1 = self.stage4_unit2_conv1(stage4_unit2_relu1)
stage4_unit2_bn2 = self.stage4_unit2_bn2(stage4_unit2_conv1)
stage4_unit2_relu2 = F.relu(stage4_unit2_bn2)
stage4_unit2_conv2_pad = F.pad(stage4_unit2_relu2, (1, 1, 1, 1))
stage4_unit2_conv2 = self.stage4_unit2_conv2(stage4_unit2_conv2_pad)
stage4_unit2_bn3 = self.stage4_unit2_bn3(stage4_unit2_conv2)
stage4_unit2_relu3 = F.relu(stage4_unit2_bn3)
stage4_unit2_conv3 = self.stage4_unit2_conv3(stage4_unit2_relu3)
plus48 = (stage4_unit2_conv3 + plus47)
stage4_unit3_bn1 = self.stage4_unit3_bn1(plus48)
stage4_unit3_relu1 = F.relu(stage4_unit3_bn1)
stage4_unit3_conv1 = self.stage4_unit3_conv1(stage4_unit3_relu1)
stage4_unit3_bn2 = self.stage4_unit3_bn2(stage4_unit3_conv1)
stage4_unit3_relu2 = F.relu(stage4_unit3_bn2)
stage4_unit3_conv2_pad = F.pad(stage4_unit3_relu2, (1, 1, 1, 1))
stage4_unit3_conv2 = self.stage4_unit3_conv2(stage4_unit3_conv2_pad)
stage4_unit3_bn3 = self.stage4_unit3_bn3(stage4_unit3_conv2)
stage4_unit3_relu3 = F.relu(stage4_unit3_bn3)
stage4_unit3_conv3 = self.stage4_unit3_conv3(stage4_unit3_relu3)
plus49 = (stage4_unit3_conv3 + plus48)
bn1 = self.bn1(plus49)
relu1 = F.relu(bn1)
pool1 = F.avg_pool2d(input=relu1, kernel_size=relu1.size()[2:])
flatten0 = pool1.view(pool1.size(0), (- 1))
fc1 = self.fc1(flatten0)
softmax = F.softmax(fc1)
return softmax
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if ((dim == 0) or (dim == 1)):
layer = nn.BatchNorm1d(**kwargs)
elif (dim == 2):
layer = nn.BatchNorm2d(**kwargs)
elif (dim == 3):
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
if ('scale' in __weights_dict[name]):
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
@staticmethod
def __conv(dim, name, **kwargs):
if (dim == 1):
layer = nn.Conv1d(**kwargs)
elif (dim == 2):
layer = nn.Conv2d(**kwargs)
elif (dim == 3):
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
@staticmethod
def __dense(name, **kwargs):
layer = nn.Linear(**kwargs)
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
|
def classifier_loader():
return KitModel(load_model_checkpoint_bytes('resnet152-imagenet11k'))
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', name)
load_model_state_dict(model, name)
return model
return classifier_loader
|
class Smooth(nn.Module):
'A smoothed classifier g '
def __init__(self, base_classifier, sigma, n, alpha, mean, std):
'\n :param base_classifier: maps from [batch x channel x height x width] to [batch x num_classes]\n :param sigma: the noise level hyperparameter\n :param n: the number of Monte Carlo samples to use\n :param alpha: the failure probability\n '
super().__init__()
self.base_classifier = base_classifier
self.sigma = sigma
self.n = n
self.alpha = alpha
self.mean = nn.Parameter(torch.tensor(mean).float().view(3, 1, 1))
self.std = nn.Parameter(torch.tensor(std).float().view(3, 1, 1))
def predict(self, x, batch_size, class_sublist=None):
' Monte Carlo algorithm for evaluating the prediction of g at x. With probability at least 1 - alpha, the\n class returned by this method will equal g(x).\n\n This function uses the hypothesis test described in https://arxiv.org/abs/1610.03944\n for identifying the top category of a multinomial distribution.\n\n :param x: the input [channel x height x width]\n :param batch_size: batch size to use when evaluating the base classifier\n :return: the predicted class, or ABSTAIN\n '
counts = self._sample_noise(x, self.n, batch_size, class_sublist)
top2 = counts.argsort()[::(- 1)]
count1 = counts[top2[0]]
count2 = counts[top2[1]]
if (binom_test(count1, (count1 + count2), p=0.5) > self.alpha):
return Smooth.ABSTAIN
else:
return counts
def _sample_noise(self, x, num, batch_size, class_sublist=None):
" Sample the base classifier's prediction under noisy corruptions of the input x.\n\n :param x: the input [channel x width x height]\n :param num: number of samples to collect\n :param batch_size:\n :return: an ndarray[int] of length num_classes containing the per-class counts\n "
with torch.no_grad():
counts = []
for _ in range(ceil((num / batch_size))):
this_batch_size = min(batch_size, num)
num -= this_batch_size
batch = x.repeat((this_batch_size, 1, 1, 1))
noise = (torch.randn_like(batch, device='cuda') * self.sigma)
logits = self.base_classify((batch + noise))
if (class_sublist is not None):
logits = logits.t()[class_sublist].t()
predictions = logits.argmax(dim=1).cpu().numpy()
counts += [self._count_arr(predictions, logits.size(1))]
return np.array(counts).sum(axis=0)
def _count_arr(self, arr, length):
counts = np.zeros(length, dtype=int)
for idx in arr:
counts[idx] += 1
return counts
def predict_batch(self, x, class_sublist):
counts = []
for img in x:
count = self.predict(img, x.size(0), class_sublist)
counts += [torch.from_numpy(count)]
counts = torch.stack(counts, dim=0)
return counts.float()
def forward(self, x):
' Definition for forward pass during adversarial (pgd) attack.\n Not meant to be the main form of evaluation. For that, see predict_batch.\n '
noise = (torch.randn_like(x) * self.sigma)
return self.base_classify((x + noise))
def base_classify(self, x):
x = ((x - self.mean) / self.std)
return self.base_classifier(x)
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
model = Smooth(model, d['noise_sigma'], d['n'], d['alpha'], d['mean'], d['std'])
return model
return classifier_loader
|
def classify(images, model, class_sublist, adversarial_attack):
if adversarial_attack:
images = pgd_style_attack(adversarial_attack, images, model)
return model.predict_batch(images, class_sublist=class_sublist)
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
return model
return classifier_loader
|
class TFHider():
tf = None
def __init__(self):
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow.python.util.deprecation as deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
TFHider.tf = tf
|
def classifier_loader():
TFHider()
gpus_list = TFHider.tf.config.experimental.list_physical_devices('GPU')
TFHider.tf.config.experimental.set_visible_devices(gpus_list[torch.cuda.current_device()], 'GPU')
with TFHider.tf.gfile.GFile('/data/~/tencent-ml-images/model.pb', 'rb') as f:
graph_def = TFHider.tf.GraphDef()
graph_def.ParseFromString(f.read())
with TFHider.tf.Graph().as_default() as graph:
TFHider.tf.import_graph_def(graph_def)
return graph
|
def classify(images, model, adversarial_attack):
images = images.cpu().numpy().transpose(0, 2, 3, 1)
with TFHider.tf.Session(graph=model) as sess:
logits = sess.run('import/logits/output:0', feed_dict={'import/Placeholder:0': images})
outputs = torch.from_numpy(logits).cuda()
return outputs
|
def gen_classifier_loader(name, d):
def classifier_loader():
if (name == 'googlenet/inceptionv1'):
model = torch_models.__dict__[d['arch']](pretrained=False, aux_logits=False, transform_input=True)
else:
model = torch_models.__dict__[d['arch']](pretrained=False)
load_model_state_dict(model, name)
return model
return classifier_loader
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = timm.create_model(name, pretrained=False, qk_scale=(d['qk_scale'] if ('qk_scale' in d) else None))
load_model_state_dict(model, name)
return model
return classifier_loader
|
class TFHider():
tf = None
def __init__(self):
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow.python.util.deprecation as deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import tensorflow as tf
TFHider.tf = tf
|
def gen_classifier_loader(name, d):
def classifier_loader():
TFHider()
gpus_list = TFHider.tf.config.experimental.list_physical_devices('GPU')
TFHider.tf.config.experimental.set_visible_devices(gpus_list[torch.cuda.current_device()], 'GPU')
loaded = TFHider.tf.saved_model.load(('/data/~/vtab/' + name), tags=[])
infer = loaded.signatures['default']
return (lambda images: infer(images)[d['output_node']])
return classifier_loader
|
def classify(images, model, adversarial_attack):
images = TFHider.tf.convert_to_tensor(images.cpu().numpy().transpose(0, 2, 3, 1))
outputs = model(images)
outputs = torch.from_numpy(outputs.numpy()).cuda()
return outputs
|
class Registry():
def __init__(self):
self.models = {}
self.eval_settings = {}
def add_model(self, model):
assert (model.name not in self.models), f'Duplicate model {model.name} found. Model names must be unique.'
self.models[model.name] = model
def add_eval_setting(self, eval_setting):
assert (eval_setting.name not in self.eval_settings), f'Duplicate eval setting {eval_setting.name} found. Eval setting names must be unique.'
self.eval_settings[eval_setting.name] = eval_setting
def load_full_registry(self):
for f in Path(__file__).parent.glob('models/*.py'):
if (('__' not in f.stem) and (str(f.stem) not in ['model_base'])):
import_module(f'models.{f.stem}')
for f in Path(__file__).parent.glob('eval_settings/*.py'):
if (('__' not in f.stem) and (str(f.stem) not in ['eval_setting_base', 'eval_setting_subsample', 'image_utils'])):
import_module(f'eval_settings.{f.stem}')
def model_names(self):
return self.models.keys()
def eval_setting_names(self):
return self.eval_settings.keys()
def contains_model(self, model_name):
return (model_name in self.models)
def contains_eval_setting(self, eval_setting_name):
return (eval_setting_name in self.eval_settings)
def get_model(self, model_name):
return self.models[model_name]
def get_eval_setting(self, eval_setting_name):
return self.eval_settings[eval_setting_name]
|
def build_clip_imagenet_model(ckpt_path):
checkpoint = torch.load(ckpt_path)
args = checkpoint['args']
hparams = checkpoint['model_hparams']
model_class = algorithms.get_algorithm_class(args['algorithm'])
feature_dim = checkpoint['model_feature_dim']
orig_num_classes = checkpoint['model_num_classes']
orig_num_domains = checkpoint['model_num_domains']
num_classes = len(imagenet_classnames)
idx2class = {i: k for (i, k) in enumerate(imagenet_classnames)}
(pretrained, preprocess) = clip.load(hparams['clip_model'], jit=False)
pretrained.float()
model = model_class(feature_dim, num_classes, orig_num_domains, hparams, pretrained, idx2class)
state_dict = checkpoint['model_dict']
if ('classifier_head.weight' in state_dict):
del state_dict['classifier_head.weight']
del state_dict['classifier_head.bias']
if ('classifier.linear.weight' in state_dict):
model.classifier = PLLogisticRegression(input_dim=feature_dim, num_classes=num_classes)
(missing_keys, unexpected_keys) = model.load_state_dict(state_dict, strict=False)
print('Missing: {}. Unexpected: {}'.format(missing_keys, unexpected_keys))
if isinstance(model, algorithms.CLIPPretrained):
model.transform = torch.nn.Identity()
model.transform = torch.nn.Sequential(model.clip_model.visual, model.transform)
model.eval()
del checkpoint['model_dict']
return (model, preprocess, checkpoint)
|
def to_rgb(image):
return image.convert('RGB')
|
def clip_transform(n_px):
return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), to_rgb, ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])
|
def load_processed_dataset(path):
processed_dataset = np.load(path)
return processed_dataset
|
class L3Attack(torch.autograd.Function):
@staticmethod
def forward(self, model, img, target_lable, dataset, allstep, sink_lr, s_radius):
return L3_function(model, img, target_lable, dataset=dataset, allstep=allstep, lr=sink_lr, s_radius=s_radius)
@staticmethod
def backward(self, grad_output):
return (None, grad_output, None, None, None, None, None)
|
class L4Attack(torch.autograd.Function):
@staticmethod
def forward(self, model, img, dataset, allstep, sink_lr, u_radius):
return L4_function(model, img, dataset=dataset, allstep=allstep, lr=sink_lr, u_radius=u_radius)
@staticmethod
def backward(self, grad_output):
return (None, grad_output, None, None, None, None)
|
def L3_function(model, img, target_lable, dataset, allstep, lr, s_radius, margin=20, use_margin=False):
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
optimizer_s = optim.SGD([x_var], lr=lr)
with torch.enable_grad():
for step in range(allstep):
optimizer_s.zero_grad()
output = model(transform(x_var, dataset=dataset))
if use_margin:
target_lable = target_lable[0].item()
(_, top2_1) = output.data.cpu().topk(2)
argmax11 = top2_1[0][0]
if (argmax11 == target_l):
argmax11 = top2_1[0][1]
loss = ((output[0][argmax11] - output[0][target_l]) + margin).clamp(min=0)
else:
loss = F.cross_entropy(output, target_lable)
loss.backward()
x_var.data = torch.clamp((x_var - (lr * x_var.grad.data)), min=0, max=1)
x_var.data = (torch.clamp((x_var - img), min=(- s_radius), max=s_radius) + img)
return x_var
|
def L4_function(model, img, dataset, allstep, lr, u_radius, margin=20, use_margin=False):
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
optimizer_s = optim.SGD([x_var], lr=lr)
with torch.enable_grad():
for step in range(allstep):
optimizer_s.zero_grad()
output = model(transform(x_var, dataset=dataset))
if use_margin:
(_, top2_1) = output.data.cpu().topk(2)
argmax11 = top2_1[0][0]
if (argmax11 == true_label):
argmax11 = top2_1[0][1]
loss = ((output[0][true_label] - output[0][argmax11]) + margin).clamp(min=0)
else:
loss = (- F.cross_entropy(output, torch.LongTensor([true_label]).cuda()))
loss.backward()
x_var.data = torch.clamp((x_var - (lr * x_var.grad.data)), min=0, max=1)
x_var.data = (torch.clamp((x_var - img), min=(- u_radius), max=u_radius) + img)
return x_var
|
def noisy_img(img, n_radius):
return (img + (n_radius * torch.randn_like(img)))
|
def cross_entropy(pred, target):
logsoftmax = nn.LogSoftmax()
return torch.mean(torch.sum(((- target) * logsoftmax(pred)), dim=1))
|
def target_distribution(original_softmax, target_label):
true_label = original_softmax.max(1, keepdim=True)[1][0].item()
target_l = original_softmax.clone()
temp = target_l.clone()[(0, int(true_label))]
target_l[(0, int(true_label))] = target_l[(0, int(target_label))]
target_l[(0, int(target_label))] = temp
return target_l
|
def PGD(model, img, dataset='imagenet', allstep=30, lr=0.03, radius=0.1, lbd=2, setting='white', noise_radius=0.1, targeted_lr=0.005, targeted_radius=0.03, untargeted_lr=0.1, untargeted_radius=0.03):
model.eval()
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
original_softmax = F.softmax(model(transform(x_var.clone(), dataset=dataset))).data
optimizer = optim.Adam([x_var], lr=lr)
target_label = random_label(true_label, dataset=dataset)
target_l = torch.LongTensor([target_label]).cuda()
target_dist = target_distribution(original_softmax, target_label)
for i in range(allstep):
optimizer.zero_grad()
total_loss = 0
output_ori = model(transform(x_var, dataset=dataset))
loss1 = cross_entropy(output_ori, target_dist)
if (setting == 'white'):
total_loss += (lbd * loss1)
noise_var = noisy(x_var, noise_radius)
output_noise = model(transform(noise_var, dataset=dataset))
loss2 = torch.norm((F.softmax(output_noise) - F.softmax(output_ori)), 1)
total_loss += loss2
new_target = torch.LongTensor([random_label(target_label, dataset=dataset)]).cuda()
t_attack_var = t_attack(model, x_var, new_target, dataset, 1, targeted_lr, targeted_radius)
output_t_attack = model(transform(t_attack_var, dataset=dataset))
loss3 = F.cross_entropy(output_t_attack, new_target)
total_loss += loss3
u_attack_var = u_attack(model, x_var, dataset, 1, untargeted_lr, untargeted_radius)
output_u_attack = model(transform(u_attack_var, dataset=dataset))
loss4 = F.cross_entropy(output_u_attack, target_l)
total_loss -= loss4
elif (setting == 'gray'):
total_loss += loss1
else:
raise 'attack setting is not supported'
total_loss.backward()
optimizer.step()
x_var.data = (torch.clamp((torch.clamp(x_var, min=0, max=1) - img), min=(- radius), max=radius) + img)
return x_var
|
def CW(model, img, dataset='imagenet', allstep=30, lr=0.03, radius=0.1, margin=20.0, lbd=2, setting='white', noise_radius=0.1, targeted_lr=0.005, targeted_radius=0.03, untargeted_lr=0.1, untargeted_radius=0.03):
model.eval()
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
optimizer = optim.Adam([x_var], lr=lr)
target_label = random_label(true_label, dataset=dataset)
for step in range(allstep):
optimizer.zero_grad()
total_loss = 0
output_ori = model(transform(x_var, dataset=dataset))
(_, top2_1) = output_ori.data.cpu().topk(2)
argmax11 = top2_1[0][0]
if (argmax11 == target_label):
argmax11 = top2_1[0][1]
loss1 = ((output_ori[0][argmax11] - output_ori[0][target_label]) + margin).clamp(min=0)
if (setting == 'white'):
total_loss += (lbd * loss1)
noise_var = noisy(x_var, noise_radius)
output_noise = model(transform(noise_var, dataset=dataset))
loss2 = torch.norm((F.softmax(output_noise) - F.softmax(output_ori)), 1)
total_loss += loss2
new_tl = random_label(target_label, dataset=dataset)
new_target = torch.LongTensor([new_tl]).cuda()
t_attack_var = t_attack(model, x_var, new_target, dataset, 1, targeted_lr, targeted_radius)
output_t_attack = model(transform(t_attack_var, dataset=dataset))
(_, top2_3) = output_t_attack.data.cpu().topk(2)
argmax13 = top2_3[0][0]
if (argmax13 == new_tl):
argmax13 = top2_3[0][1]
loss3 = ((output_t_attack[0][argmax13] - output_t_attack[0][new_tl]) + margin).clamp(min=0)
total_loss += loss3
u_attack_var = u_attack(model, x_var, dataset, 1, untargeted_lr, untargeted_radius)
output_u_attack = model(transform(u_attack_var, dataset=dataset))
(_, top2_4) = output_u_attack.data.cpu().topk(2)
argmax14 = top2_4[0][1]
if (argmax14 == target_label):
argmax14 = top2_4[0][0]
loss4 = ((output_u_attack[0][argmax14] - output_u_attack[0][target_label]) + margin).clamp(min=0)
total_loss -= loss4
elif (setting == 'gray'):
total_loss += loss1
else:
raise 'attack setting is not supported'
total_loss.backward()
optimizer.step()
x_var.data = (torch.clamp((torch.clamp(x_var, min=0, max=1) - img), min=(- radius), max=radius) + img)
return x_var
|
def l1_detection(model, img, dataset, n_radius):
return torch.norm((F.softmax(model(transform(img, dataset=dataset))) - F.softmax(model(transform(noisy(img, n_radius), dataset=dataset)))), 1).item()
|
def targeted_detection(model, img, dataset, lr, t_radius, cap=200, margin=20, use_margin=False):
model.eval()
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
optimizer_s = optim.SGD([x_var], lr=lr)
target_l = torch.LongTensor([random_label(true_label, dataset=dataset)]).cuda()
counter = 0
while (model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item() == true_label):
optimizer_s.zero_grad()
output = model(transform(x_var, dataset=dataset))
if use_margin:
target_l = target_l[0].item()
(_, top2_1) = output.data.cpu().topk(2)
argmax11 = top2_1[0][0]
if (argmax11 == target_l):
argmax11 = top2_1[0][1]
loss = ((output[0][argmax11] - output[0][target_l]) + margin).clamp(min=0)
else:
loss = F.cross_entropy(output, target_l)
loss.backward()
x_var.data = torch.clamp((x_var - (lr * x_var.grad.data)), min=0, max=1)
x_var.data = (torch.clamp((x_var - img), min=(- t_radius), max=t_radius) + img)
counter += 1
if (counter >= cap):
break
return counter
|
def untargeted_detection(model, img, dataset, lr, u_radius, cap=1000, margin=20, use_margin=False):
model.eval()
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
optimizer_s = optim.SGD([x_var], lr=lr)
counter = 0
while (model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item() == true_label):
optimizer_s.zero_grad()
output = model(transform(x_var, dataset=dataset))
if use_margin:
(_, top2_1) = output.data.cpu().topk(2)
argmax11 = top2_1[0][0]
if (argmax11 == true_label):
argmax11 = top2_1[0][1]
loss = ((output[0][true_label] - output[0][argmax11]) + margin).clamp(min=0)
else:
loss = (- F.cross_entropy(output, torch.LongTensor([true_label]).cuda()))
loss.backward()
x_var.data = torch.clamp((x_var - (lr * x_var.grad.data)), min=0, max=1)
x_var.data = (torch.clamp((x_var - img), min=(- u_radius), max=u_radius) + img)
counter += 1
if (counter >= cap):
break
return counter
|
def l1_vals(model, dataset, title, attack, lowind, upind, real_dir, adv_dir, n_radius):
vals = np.zeros(0)
if (attack == 'real'):
for i in range(lowind, upind):
image_dir = os.path.join(real_dir, (str(i) + '_img.pt'))
assert os.path.exists(image_dir)
view_data = torch.load(image_dir)
model.eval()
val = l1_detection(model, view_data, dataset, n_radius)
vals = np.concatenate((vals, [val]))
else:
cout = (upind - lowind)
for i in range(lowind, upind):
image_dir = os.path.join(os.path.join(adv_dir, attack), ((str(i) + title) + '.pt'))
assert os.path.exists(image_dir)
adv = torch.load(image_dir)
real_label = torch.load(os.path.join(real_dir, (str(i) + '_label.pt')))
model.eval()
predicted_label = model(transform(adv.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0]
if (real_label == predicted_label):
cout -= 1
continue
val = l1_detection(model, adv, dataset, n_radius)
vals = np.concatenate((vals, [val]))
print('this is number of success in l1 detection', cout)
return vals
|
def targeted_vals(model, dataset, title, attack, lowind, upind, real_dir, adv_dir, targeted_lr, t_radius):
vals = np.zeros(0)
if (attack == 'real'):
for i in range(lowind, upind):
image_dir = os.path.join(real_dir, (str(i) + '_img.pt'))
assert os.path.exists(image_dir)
view_data = torch.load(image_dir)
model.eval()
val = targeted_detection(model, view_data, dataset, targeted_lr, t_radius)
vals = np.concatenate((vals, [val]))
else:
cout = (upind - lowind)
for i in range(lowind, upind):
image_dir = os.path.join(os.path.join(adv_dir, attack), ((str(i) + title) + '.pt'))
assert os.path.exists(image_dir)
adv = torch.load(image_dir)
real_label = torch.load(os.path.join(real_dir, (str(i) + '_label.pt')))
model.eval()
predicted_label = model(transform(adv.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0]
if (real_label == predicted_label):
cout -= 1
continue
val = targeted_detection(model, adv, dataset, targeted_lr, t_radius)
vals = np.concatenate((vals, [val]))
print('this is number of success in targeted detection', cout)
return vals
|
def untargeted_vals(model, dataset, title, attack, lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius):
vals = np.zeros(0)
if (attack == 'real'):
for i in range(lowind, upind):
image_dir = os.path.join(real_dir, (str(i) + '_img.pt'))
assert os.path.exists(image_dir)
view_data = torch.load(image_dir)
model.eval()
val = untargeted_detection(model, view_data, dataset, untargeted_lr, u_radius)
vals = np.concatenate((vals, [val]))
else:
cout = (upind - lowind)
for i in range(lowind, upind):
image_dir = os.path.join(os.path.join(adv_dir, attack), ((str(i) + title) + '.pt'))
assert os.path.exists(image_dir)
adv = torch.load(image_dir)
real_label = torch.load(os.path.join(real_dir, (str(i) + '_label.pt')))
model.eval()
predicted_label = model(transform(adv.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0]
if (real_label == predicted_label):
cout -= 1
continue
val = untargeted_detection(model, adv, dataset, untargeted_lr, u_radius)
vals = np.concatenate((vals, [val]))
print('this is number of success in untargeted detection', cout)
return vals
|
def single_metric_fpr_tpr(fpr, criterions, model, dataset, title, attacks, lowind, upind, real_dir, adv_dir, n_radius, targeted_lr, t_radius, untargeted_lr, u_radius, opt='l1'):
if (opt == 'l1'):
target = l1_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, n_radius)
threshold = criterions[fpr][0]
print('this is l1 norm for real images', target)
elif (opt == 'targeted'):
target = targeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)
threshold = criterions[fpr][1]
print('this is step of targetd attack for real images', target)
elif (opt == 'untargeted'):
target = untargeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)
threshold = criterions[fpr][2]
print('this is step of untargetd attack for real images', target)
else:
raise 'Not implemented'
fpr_accurate = ((len(target[(target > threshold)]) * 1.0) / len(target))
print('corresponding accurate fpr of this threshold is', fpr_accurate)
for i in range(len(attacks)):
if (opt == 'l1'):
a_target = l1_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, n_radius)
print('this is l1 norm for ', attacks[i], a_target)
elif (opt == 'targeted'):
a_target = targeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)
print('this is step of targetd attack for ', attacks[i], a_target)
elif (opt == 'untargeted'):
a_target = untargeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)
print('this is step of untargetd attack for ', attacks[i], a_target)
else:
raise 'Not implemented'
tpr = ((len(a_target[(a_target > threshold)]) * 1.0) / len(a_target))
print((('corresponding tpr for ' + attacks[i]) + ' of this threshold is'), tpr)
|
def combined_metric_fpr_tpr(fpr, criterions, model, dataset, title, attacks, lowind, upind, real_dir, adv_dir, n_radius, targeted_lr, t_radius, untargeted_lr, u_radius):
target_1 = l1_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, n_radius)
target_2 = targeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)
target_3 = untargeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)
fpr_accurate = ((len(target_1[np.logical_or(np.logical_or((target_1 > criterions[fpr][0]), (target_2 > criterions[fpr][1])), (target_3 > criterions[fpr][2]))]) * 1.0) / len(target_1))
print('corresponding accurate fpr of this threshold is ', fpr_accurate)
for i in range(len(attacks)):
a_target_1 = l1_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, n_radius)
a_target_2 = targeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)
a_target_3 = untargeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)
tpr = ((len(a_target_1[np.logical_or(np.logical_or((a_target_1 > criterions[fpr][0]), (a_target_2 > criterions[fpr][1])), (a_target_3 > criterions[fpr][2]))]) * 1.0) / len(a_target_1))
print((('corresponding tpr for ' + attacks[i]) + ' of this threshold is'), tpr)
|
def tune_criterion_thresholds(model, dataset, title, attacks, lowind, upind, real_dir, adv_dir, n_radius, targeted_lr, t_radius, untargeted_lr, u_radius, target_fpr):
target_1 = l1_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, n_radius)
target_2 = targeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)
target_3 = untargeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)
p_1 = target_1.copy()
p_2 = target_2.copy()
p_3 = target_3.copy()
p_1.sort()
p_2.sort()
p_3.sort()
fpr = np.zeros((((len(p_1) * len(p_2)) * len(p_3)) + 1))
for ix_1 in range(0, len(p_1)):
for ix_2 in range(0, len(p_2)):
for ix_3 in range(0, len(p_3)):
fpr[((((len(p_2) * len(p_3)) * ix_1) + (len(p_3) * ix_2)) + ix_3)] = ((len(target_1[np.logical_or(np.logical_or((target_1 > p_1[ix_1]), (target_2 > p_2[ix_2])), (target_3 > p_3[ix_3]))]) * 1.0) / len(target_1))
fpr[(- 1)] = ((len(target_1[np.logical_or(np.logical_or((target_1 >= p_1[(- 1)]), (target_2 >= p_2[(- 1)])), (target_3 >= p_3[(- 1)]))]) * 1.0) / len(target_1))
plt.figure(figsize=(8, 8))
for i in range(len(attacks)):
tprs = []
suitable_pairs = []
a_target_1 = l1_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, n_radius)[::(- 1)]
a_target_2 = targeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)[::(- 1)]
a_target_3 = untargeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)[::(- 1)]
tpr = np.zeros(((len(p_1) * len(p_2)) + 1))
for ix_1 in range(0, len(p_1)):
for ix_2 in range(0, len(p_2)):
for ix_3 in range(0, len(p_3)):
tpr[((((len(p_2) * len(p_3)) * ix_1) + (len(p_3) * ix_2)) + ix_3)] = ((len(a_target_1[np.logical_or(np.logical_or((a_target_1 > p_1[ix_1]), (a_target_2 > p_2[ix_2])), (a_target_3 > p_3[ix_3]))]) * 1.0) / len(a_target_1))
if ((fpr[((((len(p_2) * len(p_3)) * ix_1) + (len(p_3) * ix_2)) + ix_3)] <= target_fpr) and (fpr[((((len(p_2) * len(p_3)) * ix_1) + (len(p_3) * ix_2)) + ix_3)] > (target_fpr - 0.01))):
suitable_pairs.append((p_1[ix_1], p_2[ix_2], p_3[ix_3]))
tprs.append(tpr[((((len(p_2) * len(p_3)) * ix_1) + (len(p_3) * ix_2)) + ix_3)])
tpr[(- 1)] = ((len(a_target_1[np.logical_or(np.logical_or((a_target_1 >= p_1[(- 1)]), (a_target_2 >= p_2[(- 1)])), (a_target_3 >= p_3[(- 1)]))]) * 1.0) / len(a_target_1))
return (suitable_pairs, tprs)
|
class VGG(nn.Module):
'\n VGG model\n '
def __init__(self, features):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Linear(512, 10))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x
|
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
|
def vgg19():
'VGG 19-layer model (configuration "E")'
return VGG(make_layers([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']))
|
def main():
global args, best_prec1
args = parser.parse_args()
if (not os.path.exists(args.save_dir)):
os.makedirs(args.save_dir)
if (not os.path.exists(args.real_dir)):
os.makedirs(args.real_dir)
model = vgg19()
model.features = torch.nn.DataParallel(model.features)
if args.cpu:
model.cpu()
else:
model.cuda()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(root=args.real_dir, train=True, transform=transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize]), download=True), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(root=args.real_dir, train=False, transform=transforms.Compose([transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss()
if args.cpu:
criterion = criterion.cpu()
else:
criterion = criterion.cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
train(train_loader, model, criterion, optimizer, epoch)
prec1 = validate(val_loader, model, criterion)
is_best = (prec1 > best_prec1)
best_prec1 = max(prec1, best_prec1)
if is_best:
save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_prec1': best_prec1}, is_best, filename=os.path.join(args.save_dir, 'model_best.pth.tar'))
else:
save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_prec1': best_prec1}, is_best, filename=os.path.join(args.save_dir, 'checkpoint_{}.tar'.format(epoch)))
|
def train(train_loader, model, criterion, optimizer, epoch):
'\n Run one train epoch\n '
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for (i, (input, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.cpu == False):
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output.float()
loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))
|
def validate(val_loader, model, criterion):
'\n Run evaluation\n '
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
for (i, (input, target)) in enumerate(val_loader):
if (args.cpu == False):
input = input.cuda()
target = target.cuda()
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
'\n Save the training model\n '
torch.save(state, filename)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def adjust_learning_rate(optimizer, epoch):
'Sets the learning rate to the initial LR decayed by 2 every 30 epochs'
lr = (args.lr * (0.5 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def run_tasks(config_path, cuda_devices):
command = f'HYDRA_CONFIG_PATH={config_path} python run_tasks_on_multiple_gpus.py cuda_devices={cuda_devices}'
log.info(f'Command: {command}')
ret = os.system(command)
if (ret != 0):
raise RuntimeError(ret)
return ret
|
def average_results(config, work_dir):
tasks = []
for model_dir_name in os.listdir(config.model_dir):
model_path = (Path(config.model_dir) / model_dir_name)
model_args_str = config.args
model_args_str += ' '
model_args_str += f'model.model_name_or_path={model_path}'
for seed in config.seeds.split(','):
args_str = model_args_str
args_str += ' '
args_str += f'seed={seed}'
args_str += ' '
output_dir = str((((Path(work_dir) / 'results') / model_dir_name) / seed))
args_str += f'hydra.run.dir={output_dir}'
args_str += ' '
args_str += f'output_dir={output_dir}'
args_str += ' '
args_str += ' do_train=False do_eval=True '
task = {'config_path': config.config_path, 'environ': '', 'command': 'run_glue.py', 'name': f'model_{model_dir_name}_{seed}', 'args': args_str}
tasks.append(task)
config_path = (Path(work_dir) / 'config.yaml')
config_structure = {}
config_structure['cuda_devices'] = ''
config_structure['tasks'] = tasks
config_structure['hydra'] = {'run': {'dir': work_dir}}
with open(config_path, 'w') as f:
yaml.dump(config_structure, f)
run_tasks(config_path, config.cuda_devices)
|
@hydra.main(config_path=os.environ['HYDRA_CONFIG_PATH'])
def main(config):
auto_generated_dir = os.getcwd()
log.info(f'Work dir: {auto_generated_dir}')
os.chdir(hydra.utils.get_original_cwd())
average_results(config, auto_generated_dir)
|
def convert_dropouts(model, ue_args):
if (ue_args.dropout_type == 'MC'):
dropout_ctor = (lambda p, activate: DropoutMC(p=ue_args.inference_prob, activate=False))
elif (ue_args.dropout_type == 'DPP'):
def dropout_ctor(p, activate):
return DropoutDPP(p=p, activate=activate, max_n=ue_args.dropout.max_n, max_frac=ue_args.dropout.max_frac, mask_name=ue_args.dropout.mask_name)
else:
raise ValueError(f'Wrong dropout type: {ue_args.dropout_type}')
if (ue_args.dropout_subs == 'last'):
set_last_dropout(model, dropout_ctor(p=ue_args.inference_prob, activate=False))
elif (ue_args.dropout_subs == 'all'):
convert_to_mc_dropout(model.electra.encoder, {'Dropout': dropout_ctor})
else:
raise ValueError(f'Wrong ue args {ue_args.dropout_subs}')
|
def calculate_dropouts(model):
res = 0
for (i, layer) in enumerate(list(model.children())):
module_name = list(model._modules.items())[i][0]
layer_name = layer._get_name()
if (layer_name == 'Dropout'):
res += 1
else:
res += calculate_dropouts(model=layer)
return res
|
def freeze_all_dpp_dropouts(model, freeze):
for layer in model.children():
if isinstance(layer, DropoutDPP):
if freeze:
layer.mask.freeze(dry_run=True)
else:
layer.mask.unfreeze(dry_run=True)
else:
freeze_all_dpp_dropouts(model=layer, freeze=freeze)
|
def compute_metrics(is_regression, metric, p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = (np.squeeze(preds) if is_regression else np.argmax(preds, axis=1))
result = metric.compute(predictions=preds, references=p.label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
|
def do_predict_eval(model, tokenizer, trainer, eval_dataset, train_dataset, metric, config, work_dir):
log.info('*** Evaluate ***')
training_args = config.training
true_labels = [example.label for example in eval_dataset]
tagger = TextClassifier(model, tokenizer, training_args=training_args, trainer=trainer)
(preds, probs) = tagger.predict(eval_dataset)
ue_args = config.ue
eval_results = {}
eval_results['true_labels'] = true_labels
eval_results['probabilities'] = probs.tolist()
eval_results['answers'] = preds.tolist()
eval_results['sampled_probabilities'] = []
eval_results['sampled_answers'] = []
log.info('******Perform stochastic inference...*******')
log.info('Model before dropout replacement:')
log.info(str(model))
convert_dropouts(model, ue_args)
log.info('Model after dropout replacement:')
log.info(str(model))
activate_mc_dropout(model, activate=True, random=ue_args.inference_prob)
if (ue_args.dropout_type == 'DPP'):
log.info('**************Dry run********************')
freeze_all_dpp_dropouts(model, freeze=True)
dry_run_dataset = (eval_dataset if (ue_args.dropout.dry_run_dataset == 'eval') else train_dataset)
tagger.predict(dry_run_dataset)
freeze_all_dpp_dropouts(model, freeze=False)
log.info('Done.')
log.info('****************Start runs**************')
eval_metric = metric
set_seed(config.seed)
random.seed(config.seed)
for i in tqdm(range(ue_args.committee_size)):
(preds, probs) = tagger.predict(eval_dataset)
eval_results['sampled_probabilities'].append(probs.tolist())
eval_results['sampled_answers'].append(preds.tolist())
if ue_args.eval_passes:
eval_score = eval_metric.compute(predictions=preds, references=true_labels)
log.info(f'Eval score: {eval_score}')
log.info('Done.')
activate_mc_dropout(model, activate=False)
with open((Path(work_dir) / 'dev_inference.json'), 'w') as res:
json.dump(eval_results, res)
if (wandb.run is not None):
wandb.save((Path(work_dir) / 'dev_inference.json'))
|
def fix_task_name(task_name):
return ('sst2' if (task_name == 'sst-2') else task_name)
|
def train_eval_glue_model(config, training_args, data_args, work_dir):
ue_args = config.ue
model_args = config.model
log.info(f'Seed: {config.seed}')
set_seed(config.seed)
random.seed(config.seed)
mnli_mm = False
if (data_args.task_name == 'mnli-mm'):
mnli_mm = True
data_args.task_name = 'mnli'
try:
is_regression = (data_args.task_name == 'stsb')
if (not is_regression):
num_labels = glue_tasks_num_labels[data_args.task_name]
else:
num_labels = 1
except KeyError:
raise ValueError(('Task not found: %s' % data_args.task_name))
model_config = AutoConfig.from_pretrained(model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=config.cache_dir)
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=config.cache_dir)
if ue_args.use_cache:
if ('electra' in model_args.model_name_or_path):
model = ElectraForSequenceClassificationCached.from_pretrained(model_args.model_name_or_path, from_tf=False, config=model_config, cache_dir=config.cache_dir)
model.use_cache = True
model.classifier = ElectraClassificationHeadCustom(model.classifier)
log.info("Replaced ELECTRA's head")
elif ('bert' in model_args.model_name_or_path):
model = BertForSequenceClassificationCached.from_pretrained(model_args.model_name_or_path, from_tf=False, config=model_config, cache_dir=config.cache_dir)
model.use_cache = True
else:
raise ValueError(f'{model_args.model_name_or_path} does not have a cached option.')
elif ('electra' in model_args.model_name_or_path):
model = ElectraForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=False, config=model_config, cache_dir=config.cache_dir)
model.classifier = ElectraClassificationHeadCustom(model.classifier)
log.info("Replaced ELECTRA's head")
else:
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=False, config=model_config, cache_dir=config.cache_dir)
print(model)
train_dataset = None
if (config.do_train or ((config.ue.dropout_type == 'DPP') and (config.ue.dropout.dry_run_dataset != 'eval'))):
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, cache_dir=config.cache_dir)
if (config.do_train and (config.data.subsample_perc > 0)):
indexes = list(range(len(train_dataset)))
train_indexes = random.sample(indexes, int((len(train_dataset) * config.data.subsample_perc)))
train_dataset = torch.utils.data.Subset(train_dataset, train_indexes)
if mnli_mm:
data_args = dataclasses.replace(data_args, task_name='mnli-mm')
eval_dataset = (GlueDataset(data_args, tokenizer=tokenizer, mode='dev', cache_dir=config.cache_dir) if config.do_eval else None)
metric_task_name = ('sst2' if (data_args.task_name == 'sst-2') else data_args.task_name)
metric = load_metric('glue', metric_task_name, keep_in_memory=True, cache_dir=config.cache_dir)
metric_fn = (lambda p: compute_metrics(is_regression, metric, p))
training_args.save_steps = 0
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=metric_fn)
if config.do_train:
trainer.train(model_path=(model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None))
trainer.save_model(work_dir)
if trainer.is_world_master():
tokenizer.save_pretrained(work_dir)
if config.do_eval:
do_predict_eval(model, tokenizer, trainer, eval_dataset, train_dataset, metric, config, work_dir)
|
def update_config(cfg_old, cfg_new):
for (k, v) in cfg_new.items():
if (k in cfg_old.__dict__):
setattr(cfg_old, k, v)
return cfg_old
|
@hydra.main(config_path=os.environ['HYDRA_CONFIG_PATH'])
def main(config):
os.environ['WANDB_WATCH'] = 'False'
auto_generated_dir = os.getcwd()
log.info(f'Work dir: {auto_generated_dir}')
os.chdir(hydra.utils.get_original_cwd())
wandb_run = init_wandb(auto_generated_dir, config)
args_train = TrainingArguments(output_dir=auto_generated_dir)
args_train = update_config(args_train, config.training)
args_data = DataTrainingArguments(task_name=config.data.task_name, data_dir=config.data.data_dir)
args_data = update_config(args_data, config.data)
train_eval_glue_model(config, args_train, args_data, auto_generated_dir)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.