repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
AFC | AFC-master/inclearn/convnet/my_resnet_mtl.py | """Pytorch port of the resnet used for CIFAR100 by iCaRL.
https://github.com/srebuffi/iCaRL/blob/master/iCaRL-TheanoLasagne/utils_cifar100.py
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from inclearn.convnet.tools.conv_mtl import Conv2dMtl
from inclearn.lib import pooling
from torch.nn import init
logger = logging.getLogger(__name__)
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
Conv2dMtl(inplanes, planes, stride=2, kernel_size=1, bias=False),
nn.BatchNorm2d(planes),
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False, downsampling="stride"):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = Conv2dMtl(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = Conv2dMtl(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
if increase_dim:
if downsampling == "stride":
self.downsampler = DownsampleStride()
self._need_pad = True
else:
self.downsampler = DownsampleConv(inplanes, planes)
self._need_pad = False
self.last_relu = last_relu
@staticmethod
def pad(x):
return torch.cat((x, x.mul(0)), 1)
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsampler(x)
if self._need_pad:
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class PreActResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False):
super().__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = Conv2dMtl(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_b = nn.BatchNorm2d(planes)
self.conv_b = Conv2dMtl(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last_relu = last_relu
def forward(self, x):
y = self.bn_a(x)
y = F.relu(y, inplace=True)
y = self.conv_a(x)
y = self.bn_b(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class Stage(nn.Module):
def __init__(self, blocks, block_relu=False):
super().__init__()
self.blocks = nn.ModuleList(blocks)
self.block_relu = block_relu
def forward(self, x):
intermediary_features = []
for b in self.blocks:
x = b(x)
intermediary_features.append(x)
if self.block_relu:
x = F.relu(x)
return intermediary_features, x
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(
self,
n=5,
nf=16,
channels=3,
preact=False,
zero_residual=True,
pooling_config={"type": "avg"},
downsampling="stride",
final_layer=False,
all_attentions=False,
last_relu=False,
**kwargs
):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
if kwargs:
raise ValueError("Unused kwargs: {}.".format(kwargs))
self.all_attentions = all_attentions
logger.info("Downsampling type {}".format(downsampling))
self._downsampling_type = downsampling
self.last_relu = last_relu
Block = ResidualBlock if not preact else PreActResidualBlock
super(CifarResNet, self).__init__()
self.conv_1_3x3 = Conv2dMtl(channels, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(nf)
self.stage_1 = self._make_layer(Block, nf, increase_dim=False, n=n)
self.stage_2 = self._make_layer(Block, nf, increase_dim=True, n=n - 1)
self.stage_3 = self._make_layer(Block, 2 * nf, increase_dim=True, n=n - 2)
self.stage_4 = Block(
4 * nf, increase_dim=False, last_relu=False, downsampling=self._downsampling_type
)
if pooling_config["type"] == "avg":
self.pool = nn.AdaptiveAvgPool2d((1, 1))
elif pooling_config["type"] == "weldon":
self.pool = pooling.WeldonPool2d(**pooling_config)
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.out_dim = 4 * nf
if final_layer in (True, "conv"):
self.final_layer = Conv2dMtl(self.out_dim, self.out_dim, kernel_size=1, bias=False)
elif isinstance(final_layer, dict):
if final_layer["type"] == "one_layer":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
elif final_layer["type"] == "two_layers":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, self.out_dim), nn.BatchNorm1d(self.out_dim),
nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
else:
raise ValueError("Unknown final layer type {}.".format(final_layer["type"]))
else:
self.final_layer = None
for m in self.modules():
if isinstance(m, Conv2dMtl):
#nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
m.reset_parameters()
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if zero_residual:
for m in self.modules():
if isinstance(m, ResidualBlock):
nn.init.constant_(m.bn_b.weight, 0)
def _make_layer(self, Block, planes, increase_dim=False, n=None):
layers = []
if increase_dim:
layers.append(
Block(
planes,
increase_dim=True,
last_relu=self.last_relu,
downsampling=self._downsampling_type
)
)
planes = 2 * planes
for i in range(n):
layers.append(
Block(planes, last_relu=self.last_relu, downsampling=self._downsampling_type)
)
return Stage(layers, block_relu=self.last_relu)
@property
def last_conv(self):
return self.stage_4.conv_b
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
feats_s1, x = self.stage_1(x)
feats_s2, x = self.stage_2(x)
feats_s3, x = self.stage_3(x)
x = self.stage_4(x)
raw_features = self.end_features(x)
features = self.end_features(F.relu(x, inplace=False))
if self.all_attentions:
attentions = [*feats_s1, *feats_s2, *feats_s3, x]
else:
attentions = [feats_s1[-1], feats_s2[-1], feats_s3[-1], x]
return {"raw_features": raw_features, "features": features, "attention": attentions}
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
if self.final_layer is not None:
x = self.final_layer(x)
return x
def apply_mtl(self, b):
logger.info(f"Apply mtl: {b}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.apply_mtl = b
def apply_mtl_bias(self, b):
logger.info(f"Apply mtl bias: {b}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.apply_mtl_bias = b
def apply_bias_on_weights(self, b):
logger.info(f"Apply mtl bias on weights: {b}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.apply_bias_on_weights = b
def fuse_mtl_weights(self):
logger.info("Fuse mtl weights.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.fuse_mtl_weights()
def reset_mtl_parameters(self):
logger.info("Reset mtl weights.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.reset_mtl_parameters()
def freeze_convnet(self, freeze, bn_weights=False, bn_stats=False):
logger.info(f"Freeze convnet: {freeze}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.freeze_convnet(freeze)
elif isinstance(m, nn.BatchNorm2d):
if freeze and bn_stats:
m.eval()
else:
m.train()
if bn_weights:
m.weight.requires_grad = not freeze
m.bias.requires_grad = not freeze
def resnet_rebuffi(n=5, **kwargs):
return CifarResNet(n=n, **kwargs)
| 10,928 | 29.190608 | 97 | py |
AFC | AFC-master/inclearn/convnet/resnet.py | """Taken & slightly modified from:
* https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.nn import functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last_relu=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last_relu = last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.last_relu:
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, last_relu=True):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.last_relu = last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.last_relu:
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
zero_init_residual=True,
nf=16,
last_relu=False,
initial_kernel=3,
**kwargs
):
super(ResNet, self).__init__()
self.last_relu = last_relu
self.inplanes = nf
self.conv1 = nn.Conv2d(3, nf, kernel_size=initial_kernel, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(nf)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 1 * nf, layers[0])
self.layer2 = self._make_layer(block, 2 * nf, layers[1], stride=2)
self.layer3 = self._make_layer(block, 4 * nf, layers[2], stride=2)
self.layer4 = self._make_layer(block, 8 * nf, layers[3], stride=2, last=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_dim = 8 * nf * block.expansion
print("Features dimension is {}.".format(self.out_dim))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, last=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if i == blocks - 1 or last:
layers.append(block(self.inplanes, planes, last_relu=False))
else:
layers.append(block(self.inplanes, planes, last_relu=self.last_relu))
return nn.Sequential(*layers)
@property
def last_block(self):
return self.layer4
@property
def last_conv(self):
return self.layer4[-1].conv2
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x_1 = self.layer1(x)
x_2 = self.layer2(self.end_relu(x_1))
x_3 = self.layer3(self.end_relu(x_2))
x_4 = self.layer4(self.end_relu(x_3))
raw_features = self.end_features(x_4)
features = self.end_features(F.relu(x_4, inplace=False))
return {
"raw_features": raw_features,
"features": features,
"attention": [x_1, x_2, x_3, x_4]
}
def end_features(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def end_relu(self, x):
if hasattr(self, "last_relu") and self.last_relu:
return F.relu(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
print("Loading pretrained network")
state_dict = model_zoo.load_url(model_urls['resnet18'])
del state_dict["fc.weight"]
del state_dict["fc.bias"]
model.load_state_dict(state_dict)
return model
def resnet32(**kwargs):
model = ResNet(BasicBlock, [5, 4, 3, 2], **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
print("Loading pretrained network")
state_dict = model_zoo.load_url(model_urls['resnet101'])
del state_dict["fc.weight"]
del state_dict["fc.bias"]
model.load_state_dict(state_dict)
#model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 8,824 | 30.294326 | 106 | py |
AFC | AFC-master/inclearn/convnet/vgg.py | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = [
'VGG',
'vgg11',
'vgg11_bn',
'vgg13',
'vgg13_bn',
'vgg16',
'vgg16_bn',
'vgg19_bn',
'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(nn.Linear(512 * 7 * 7, 4096))
if init_weights:
self._initialize_weights()
self.out_dim = 4096
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return {"raw_features": x, "features": x, "attention": None}
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E':
[
64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512,
512, 512, 'M'
],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = model_zoo.load_url(model_urls[arch], progress=progress)
del state_dict["classifier.3.weight"]
del state_dict["classifier.3.bias"]
del state_dict["classifier.6.weight"]
del state_dict["classifier.6.bias"]
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
| 7,286 | 36.953125 | 113 | py |
AFC | AFC-master/inclearn/convnet/densenet.py | import re
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1,
bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate,
bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000,
**kwargs):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate,
drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.out_dim = num_features
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
return out
def _load_state_dict(model, model_url, progress):
pass
def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,
**kwargs):
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model
def densenet121(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet161(pretrained=False, progress=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
def densenet169(pretrained=False, progress=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
def densenet201(pretrained=False, progress=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
**kwargs)
| 7,713 | 41.384615 | 95 | py |
AFC | AFC-master/inclearn/convnet/resnet_mtl.py | """Taken & slightly modified from:
* https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import logging
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from inclearn.convnet.tools.conv_mtl import Conv2dMtl
from torch.nn import functional as F
logger = logging.getLogger(__name__)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return Conv2dMtl(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return Conv2dMtl(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last_relu=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last_relu = last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.last_relu:
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, last_relu=True):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.last_relu = last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.last_relu:
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=True, nf=16, last_relu=False, **kwargs):
super(ResNet, self).__init__()
self.last_relu = last_relu
self.inplanes = nf
self.conv1 = Conv2dMtl(3, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(nf)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 1 * nf, layers[0])
self.layer2 = self._make_layer(block, 2 * nf, layers[1], stride=2)
self.layer3 = self._make_layer(block, 4 * nf, layers[2], stride=2)
self.layer4 = self._make_layer(block, 8 * nf, layers[3], stride=2, last=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_dim = 8 * nf * block.expansion
print("Features dimension is {}.".format(self.out_dim))
for m in self.modules():
if isinstance(m, Conv2dMtl):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, last=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if i == blocks - 1 or last:
layers.append(block(self.inplanes, planes, last_relu=False))
else:
layers.append(block(self.inplanes, planes, last_relu=self.last_relu))
return nn.Sequential(*layers)
@property
def last_block(self):
return self.layer4
@property
def last_conv(self):
return self.layer4[-1].conv2
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x_1 = self.layer1(x)
x_2 = self.layer2(self.end_relu(x_1))
x_3 = self.layer3(self.end_relu(x_2))
x_4 = self.layer4(self.end_relu(x_3))
raw_features = self.end_features(x_4)
features = self.end_features(F.relu(x_4, inplace=False))
return {
"raw_features": raw_features,
"features": features,
"attention": [x_1, x_2, x_3, x_4]
}
def end_features(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def end_relu(self, x):
if hasattr(self, "last_relu") and self.last_relu:
return F.relu(x)
return x
def apply_mtl(self, b):
logger.info(f"Apply mtl: {b}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.apply_mtl = b
def apply_mtl_bias(self, b):
logger.info(f"Apply mtl bias: {b}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.apply_mtl_bias = b
def apply_bias_on_weights(self, b):
logger.info(f"Apply mtl bias on weights: {b}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.apply_bias_on_weights = b
def fuse_mtl_weights(self):
logger.info("Fuse mtl weights.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.fuse_mtl_weights()
def reset_mtl_parameters(self):
logger.info("Reset mtl weights.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.reset_mtl_parameters()
def freeze_convnet(self, freeze, bn_weights=False, bn_stats=False):
logger.info(f"Freeze convnet: {freeze}.")
for m in self.modules():
if isinstance(m, Conv2dMtl):
m.freeze_convnet(freeze)
elif isinstance(m, nn.BatchNorm2d):
if freeze and bn_stats:
m.eval()
else:
m.train()
if bn_weights:
m.weight.requires_grad = not freeze
m.bias.requires_grad = not freeze
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet32(**kwargs):
model = ResNet(BasicBlock, [5, 4, 3, 2], **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 9,970 | 30.85623 | 106 | py |
AFC | AFC-master/inclearn/convnet/my_resnet_brn.py | ''' Incremental-Classifier Learning
Authors : Khurram Javed, Muhammad Talha Paracha
Maintainer : Khurram Javed
Lab : TUKL-SEECS R&D Lab
Email : 14besekjaved@seecs.edu.pk '''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from inclearn.lib import pooling
class BatchRenormalization2D(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.01, r_d_max_inc_step=0.0001):
super(BatchRenormalization2D, self).__init__()
self.eps = eps
self.momentum = torch.tensor((momentum), requires_grad=False)
self.gamma = torch.nn.Parameter(torch.ones((1, num_features, 1, 1)), requires_grad=True)
self.beta = torch.nn.Parameter(torch.zeros((1, num_features, 1, 1)), requires_grad=True)
self.running_avg_mean = torch.ones((1, num_features, 1, 1), requires_grad=False)
self.running_avg_std = torch.zeros((1, num_features, 1, 1), requires_grad=False)
self.max_r_max = 3.0
self.max_d_max = 5.0
self.r_max_inc_step = r_d_max_inc_step
self.d_max_inc_step = r_d_max_inc_step
self.r_max = torch.tensor((1.0), requires_grad=False)
self.d_max = torch.tensor((0.0), requires_grad=False)
def forward(self, x):
device = self.gamma.device
batch_ch_mean = torch.mean(x, dim=(0, 2, 3), keepdim=True).to(device)
batch_ch_std = torch.clamp(torch.std(x, dim=(0, 2, 3), keepdim=True), self.eps,
1e10).to(device)
self.running_avg_std = self.running_avg_std.to(device)
self.running_avg_mean = self.running_avg_mean.to(device)
self.momentum = self.momentum.to(device)
self.r_max = self.r_max.to(device)
self.d_max = self.d_max.to(device)
if self.training:
r = torch.clamp(batch_ch_std / self.running_avg_std, 1.0 / self.r_max,
self.r_max).to(device).data.to(device)
d = torch.clamp(
(batch_ch_mean - self.running_avg_mean) / self.running_avg_std, -self.d_max,
self.d_max
).to(device).data.to(device)
x = ((x - batch_ch_mean) * r) / batch_ch_std + d
x = self.gamma * x + self.beta
if self.r_max < self.max_r_max:
self.r_max += self.r_max_inc_step * x.shape[0]
if self.d_max < self.max_d_max:
self.d_max += self.d_max_inc_step * x.shape[0]
else:
x = (x - self.running_avg_mean) / self.running_avg_std
x = self.gamma * x + self.beta
self.running_avg_mean = self.running_avg_mean + self.momentum * (
batch_ch_mean.data.to(device) - self.running_avg_mean
)
self.running_avg_std = self.running_avg_std + self.momentum * (
batch_ch_std.data.to(device) - self.running_avg_std
)
return x
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(inplanes, planes, stride=2, kernel_size=1, bias=False),
BatchRenormalization2D(planes),
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last=False, downsampling="stride"):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_a = BatchRenormalization2D(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = BatchRenormalization2D(planes)
if increase_dim:
if downsampling == "stride":
self.downsampler = DownsampleStride()
self.downsample = lambda x: self.pad(self.downsampler(x))
else:
self.downsample = DownsampleConv(inplanes, planes)
self.last = last
@staticmethod
def pad(x):
return torch.cat((x, x.mul(0)), 1)
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsample(x)
y = x + y
return y
class PreActResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last=False):
super().__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.bn_a = BatchRenormalization2D(inplanes)
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_b = BatchRenormalization2D(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last = last
def forward(self, x):
y = self.bn_a(x)
y = F.relu(y, inplace=True)
y = self.conv_a(x)
y = self.bn_b(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
y = x + y
return y
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(
self,
n=5,
nf=16,
channels=3,
preact=False,
zero_residual=True,
pooling_config={"type": "avg"},
downsampling="stride",
final_layer=False,
**kwargs
):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
if kwargs:
raise ValueError("Unused kwargs: {}.".format(kwargs))
print("Downsampling type", downsampling)
self._downsampling_type = downsampling
Block = ResidualBlock if not preact else PreActResidualBlock
super(CifarResNet, self).__init__()
self.conv_1_3x3 = nn.Conv2d(channels, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = BatchRenormalization2D(nf)
self.stage_1 = self._make_layer(Block, nf, increase_dim=False, n=n)
self.stage_2 = self._make_layer(Block, nf, increase_dim=True, n=n - 1)
self.stage_3 = self._make_layer(Block, 2 * nf, increase_dim=True, n=n - 2)
self.stage_4 = Block(
4 * nf, increase_dim=False, last=True, downsampling=self._downsampling_type
)
if pooling_config["type"] == "avg":
self.pool = nn.AvgPool2d(8)
elif pooling_config["type"] == "weldon":
self.pool = pooling.WeldonPool2d(**pooling_config)
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.out_dim = 4 * nf
if final_layer in (True, "conv"):
self.final_layer = nn.Conv2d(self.out_dim, self.out_dim, kernel_size=1, bias=False)
elif isinstance(final_layer, dict):
if final_layer["type"] == "bn_relu_fc":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
else:
raise ValueError("Unknown final layer type {}.".format(final_layer["type"]))
else:
self.final_layer = lambda x: x
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, BatchRenormalization2D):
nn.init.constant_(m.gamma, 1)
nn.init.constant_(m.beta, 0)
def _make_layer(self, Block, planes, increase_dim=False, last=False, n=None):
layers = []
if increase_dim:
layers.append(Block(planes, increase_dim=True, downsampling=self._downsampling_type))
planes = 2 * planes
for i in range(n):
layers.append(Block(planes, downsampling=self._downsampling_type))
return nn.Sequential(*layers)
def forward(self, x, attention_hook=False):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x_s1 = self.stage_1(x)
x_s2 = self.stage_2(x_s1)
x_s3 = self.stage_3(x_s2)
x_s4 = self.stage_4(x_s3)
raw_features = self.end_features(x_s4)
features = self.end_features(F.relu(x_s4, inplace=False))
if attention_hook:
return raw_features, features, [x_s1, x_s2, x_s3, x_s4]
return raw_features, features
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.final_layer(x)
return x
def resnet_rebuffi(n=5, **kwargs):
return CifarResNet(n=n, **kwargs)
| 9,785 | 29.58125 | 97 | py |
AFC | AFC-master/inclearn/convnet/resnet_importance.py | """Taken & slightly modified from:
* https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.nn import functional as F
import torch
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def gradhook(self, grad_input, grad_output):
importance = grad_output[0] ** 2 # [N, C, H, W]
if len(importance.shape) == 4:
importance = torch.sum(importance, 3) # [N, C, H]
importance = torch.sum(importance, 2) # [N, C]
importance = torch.mean(importance, 0) # [C]
self.importance += importance
class Channel_Importance_Measure(nn.Module):
def __init__(self, num_channels):
super().__init__()
self.num_channels = num_channels
self.scale = nn.Parameter(torch.randn(num_channels), requires_grad=False)
nn.init.constant_(self.scale, 1.0)
self.register_buffer('importance', torch.zeros_like(self.scale))
def forward(self, x):
if len(x.shape) == 4:
x = x * self.scale.reshape([1,-1,1,1])
else:
x = x * self.scale.reshape([1,-1])
return x
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last_relu=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last_relu = last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.last_relu:
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, last_relu=True):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.last_relu = last_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.last_relu:
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
zero_init_residual=True,
nf=16,
last_relu=False,
initial_kernel=3,
**kwargs
):
super(ResNet, self).__init__()
self.last_relu = last_relu
self.inplanes = nf
self.conv1 = nn.Conv2d(3, nf, kernel_size=initial_kernel, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(nf)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 1 * nf, layers[0])
self.stage_1_importance = Channel_Importance_Measure(nf)
self.layer2 = self._make_layer(block, 2 * nf, layers[1], stride=2)
self.stage_2_importance = Channel_Importance_Measure(2*nf)
self.layer3 = self._make_layer(block, 4 * nf, layers[2], stride=2)
self.stage_3_importance = Channel_Importance_Measure(4*nf)
self.layer4 = self._make_layer(block, 8 * nf, layers[3], stride=2, last=True)
self.stage_4_importance = Channel_Importance_Measure(8*nf)
self.raw_features_importance = Channel_Importance_Measure(8*nf)
self._hook = None
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_dim = 8 * nf * block.expansion
print("Features dimension is {}.".format(self.out_dim))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, last=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if i == blocks - 1 or last:
layers.append(block(self.inplanes, planes, last_relu=False))
else:
layers.append(block(self.inplanes, planes, last_relu=self.last_relu))
return nn.Sequential(*layers)
@property
def last_block(self):
return self.layer4
@property
def last_conv(self):
return self.layer4[-1].conv2
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x_1 = self.layer1(x)
x_1 = self.stage_1_importance(x_1)
x_2 = self.layer2(self.end_relu(x_1))
x_2 = self.stage_2_importance(x_2)
x_3 = self.layer3(self.end_relu(x_2))
x_3 = self.stage_3_importance(x_3)
x_4 = self.layer4(self.end_relu(x_3))
x_4 = self.stage_4_importance(x_4)
raw_features = self.end_features(x_4)
raw_features = self.raw_features_importance(raw_features)
features = self.end_features(F.relu(x_4, inplace=False))
importance = [self.stage_1_importance.importance,
self.stage_2_importance.importance,
self.stage_3_importance.importance,
self.stage_4_importance.importance,
self.raw_features_importance.importance]
return {
"raw_features": raw_features,
"features": features,
"attention": [x_1, x_2, x_3, x_4],
"importance": importance
}
def start_cal_importance(self):
self._hook = [self.stage_1_importance.register_backward_hook(gradhook),
self.stage_2_importance.register_backward_hook(gradhook),
self.stage_3_importance.register_backward_hook(gradhook),
self.stage_4_importance.register_backward_hook(gradhook),
self.raw_features_importance.register_backward_hook(gradhook)]
def reset_importance(self):
self.stage_1_importance.importance.zero_()
self.stage_2_importance.importance.zero_()
self.stage_3_importance.importance.zero_()
self.stage_4_importance.importance.zero_()
self.raw_features_importance.importance.zero_()
def normalize_importance(self):
total_importance = torch.mean(self.stage_1_importance.importance)
self.stage_1_importance.importance = self.stage_1_importance.importance/total_importance
total_importance = torch.mean(self.stage_2_importance.importance)
self.stage_2_importance.importance = self.stage_2_importance.importance/total_importance
total_importance = torch.mean(self.stage_3_importance.importance)
self.stage_3_importance.importance = self.stage_3_importance.importance/total_importance
total_importance = torch.mean(self.stage_4_importance.importance)
self.stage_4_importance.importance = self.stage_4_importance.importance/total_importance
total_importance = torch.mean(self.raw_features_importance.importance)
self.raw_features_importance.importance = self.raw_features_importance.importance/total_importance
def stop_cal_importance(self):
for hook in self._hook:
hook.remove()
self._hook = None
def end_features(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def end_relu(self, x):
if hasattr(self, "last_relu") and self.last_relu:
return F.relu(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
print("Loading pretrained network")
state_dict = model_zoo.load_url(model_urls['resnet18'])
del state_dict["fc.weight"]
del state_dict["fc.bias"]
model.load_state_dict(state_dict)
return model
def resnet32(**kwargs):
model = ResNet(BasicBlock, [5, 4, 3, 2], **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
print("Loading pretrained network")
state_dict = model_zoo.load_url(model_urls['resnet101'])
del state_dict["fc.weight"]
del state_dict["fc.bias"]
model.load_state_dict(state_dict)
#model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 12,403 | 33.648045 | 106 | py |
AFC | AFC-master/inclearn/convnet/cifar_resnet.py | ''' Incremental-Classifier Learning
Authors : Khurram Javed, Muhammad Talha Paracha
Maintainer : Khurram Javed
Lab : TUKL-SEECS R&D Lab
Email : 14besekjaved@seecs.edu.pk '''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
assert stride == 2
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
self.featureSize = 64
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + basicblock, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes, channels=3):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
self.featureSize = 64
# Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.out_dim = 64 * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, feature=False, T=1, labels=False, scale=None, keep=None):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def forwardFeature(self, x):
pass
def resnet20(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes)
return model
def resnet10mnist(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 10, num_classes, 1)
return model
def resnet20mnist(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes, 1)
return model
def resnet32mnist(num_classes=10, channels=1):
model = CifarResNet(ResNetBasicblock, 32, num_classes, channels)
return model
def resnet32(num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes)
return model
def resnet44(num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes)
return model
def resnet56(num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes)
return model
def resnet110(num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes)
return model
| 5,946 | 29.035354 | 102 | py |
AFC | AFC-master/inclearn/convnet/my_resnet.py | """Pytorch port of the resnet used for CIFAR100 by iCaRL.
https://github.com/srebuffi/iCaRL/blob/master/iCaRL-TheanoLasagne/utils_cifar100.py
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from inclearn.lib import pooling
logger = logging.getLogger(__name__)
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(inplanes, planes, stride=2, kernel_size=1, bias=False),
nn.BatchNorm2d(planes),
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False, downsampling="stride"):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
if increase_dim:
if downsampling == "stride":
self.downsampler = DownsampleStride()
self._need_pad = True
else:
self.downsampler = DownsampleConv(inplanes, planes)
self._need_pad = False
self.last_relu = last_relu
@staticmethod
def pad(x):
return torch.cat((x, x.mul(0)), 1)
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsampler(x)
if self._need_pad:
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class PreActResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False):
super().__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_b = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last_relu = last_relu
def forward(self, x):
y = self.bn_a(x)
y = F.relu(y, inplace=True)
y = self.conv_a(x)
y = self.bn_b(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class Stage(nn.Module):
def __init__(self, blocks, block_relu=False):
super().__init__()
self.blocks = nn.ModuleList(blocks)
self.block_relu = block_relu
def forward(self, x):
intermediary_features = []
for b in self.blocks:
x = b(x)
intermediary_features.append(x)
if self.block_relu:
x = F.relu(x)
return intermediary_features, x
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(
self,
n=5,
nf=16,
channels=3,
preact=False,
zero_residual=True,
pooling_config={"type": "avg"},
downsampling="stride",
final_layer=False,
all_attentions=False,
last_relu=False,
**kwargs
):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
if kwargs:
raise ValueError("Unused kwargs: {}.".format(kwargs))
self.all_attentions = all_attentions
logger.info("Downsampling type {}".format(downsampling))
self._downsampling_type = downsampling
self.last_relu = last_relu
Block = ResidualBlock if not preact else PreActResidualBlock
super(CifarResNet, self).__init__()
self.conv_1_3x3 = nn.Conv2d(channels, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(nf)
self.stage_1 = self._make_layer(Block, nf, increase_dim=False, n=n)
self.stage_2 = self._make_layer(Block, nf, increase_dim=True, n=n - 1)
self.stage_3 = self._make_layer(Block, 2 * nf, increase_dim=True, n=n - 2)
self.stage_4 = Block(
4 * nf, increase_dim=False, last_relu=False, downsampling=self._downsampling_type
)
if pooling_config["type"] == "avg":
self.pool = nn.AdaptiveAvgPool2d((1, 1))
elif pooling_config["type"] == "weldon":
self.pool = pooling.WeldonPool2d(**pooling_config)
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.out_dim = 4 * nf
if final_layer in (True, "conv"):
self.final_layer = nn.Conv2d(self.out_dim, self.out_dim, kernel_size=1, bias=False)
elif isinstance(final_layer, dict):
if final_layer["type"] == "one_layer":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
elif final_layer["type"] == "two_layers":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, self.out_dim), nn.BatchNorm1d(self.out_dim),
nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
else:
raise ValueError("Unknown final layer type {}.".format(final_layer["type"]))
else:
self.final_layer = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if zero_residual:
for m in self.modules():
if isinstance(m, ResidualBlock):
nn.init.constant_(m.bn_b.weight, 0)
def _make_layer(self, Block, planes, increase_dim=False, n=None):
layers = []
if increase_dim:
layers.append(
Block(
planes,
increase_dim=True,
last_relu=False,
downsampling=self._downsampling_type
)
)
planes = 2 * planes
for i in range(n):
layers.append(Block(planes, last_relu=False, downsampling=self._downsampling_type))
return Stage(layers, block_relu=self.last_relu)
@property
def last_conv(self):
return self.stage_4.conv_b
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
feats_s1, x = self.stage_1(x)
feats_s2, x = self.stage_2(x)
feats_s3, x = self.stage_3(x)
x = self.stage_4(x)
raw_features = self.end_features(x)
features = self.end_features(F.relu(x, inplace=False))
if self.all_attentions:
attentions = [*feats_s1, *feats_s2, *feats_s3, x]
else:
attentions = [feats_s1[-1], feats_s2[-1], feats_s3[-1], x]
return {"raw_features": raw_features, "features": features, "attention": attentions}
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
if self.final_layer is not None:
x = self.final_layer(x)
return x
def resnet_rebuffi(n=5, **kwargs):
return CifarResNet(n=n, **kwargs)
| 9,282 | 28.469841 | 97 | py |
AFC | AFC-master/inclearn/convnet/my_resnet_mcbn.py | """Pytorch port of the resnet used for CIFAR100 by iCaRL.
https://github.com/srebuffi/iCaRL/blob/master/iCaRL-TheanoLasagne/utils_cifar100.py
"""
import logging
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from inclearn.lib import pooling
logger = logging.getLogger(__name__)
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(inplanes, planes, stride=2, kernel_size=1, bias=False),
MCBatchNorm2d(planes),
)
def forward(self, x):
return self.conv(x)
class MCBatchNorm2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.bn = nn.BatchNorm2d(*args, **kwargs)
self.recorded_means = []
self.recorded_vars = []
self.eps = 1e-8
self._mode = "normal"
def clear_records(self):
self.recorded_means = []
self.recorded_vars = []
def record_mode(self):
self._mode = "record"
def normal_mode(self):
self._mode = "normal"
def sampling_mode(self):
self._mode = "sampling"
def forward(self, x):
if self._mode == "normal":
return self.bn(x)
elif self._mode == "record":
with torch.no_grad():
self.recorded_means.append(x.mean([0, 2, 3]))
self.recorded_vars.append(x.var([0, 2, 3], unbiased=False))
return self.bn(x)
# Sampling mode
index = random.randint(0, len(self.recorded_means) - 1)
mean = self.recorded_means[index]
var = self.recorded_vars[index]
normed_x = (x -
mean[None, :, None, None]) / (torch.sqrt(var[None, :, None, None] + self.eps))
return normed_x * self.bn.weight[None, :, None, None] + self.bn.bias[None, :, None, None]
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False, downsampling="stride"):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_a = MCBatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = MCBatchNorm2d(planes)
if increase_dim:
if downsampling == "stride":
self.downsampler = DownsampleStride()
self._need_pad = True
else:
self.downsampler = DownsampleConv(inplanes, planes)
self._need_pad = False
self.last_relu = last_relu
@staticmethod
def pad(x):
return torch.cat((x, x.mul(0)), 1)
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsampler(x)
if self._need_pad:
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class PreActResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False):
super().__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.bn_a = MCBatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_b = MCBatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last_relu = last_relu
def forward(self, x):
y = self.bn_a(x)
y = F.relu(y, inplace=True)
y = self.conv_a(x)
y = self.bn_b(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class Stage(nn.Module):
def __init__(self, blocks, block_relu=False):
super().__init__()
self.blocks = nn.ModuleList(blocks)
self.block_relu = block_relu
def forward(self, x):
intermediary_features = []
for b in self.blocks:
x = b(x)
intermediary_features.append(x)
if self.block_relu:
x = F.relu(x)
return intermediary_features, x
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(
self,
n=5,
nf=16,
channels=3,
preact=False,
zero_residual=True,
pooling_config={"type": "avg"},
downsampling="stride",
final_layer=False,
all_attentions=False,
last_relu=False,
**kwargs
):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
if kwargs:
raise ValueError("Unused kwargs: {}.".format(kwargs))
self.all_attentions = all_attentions
logger.info("Downsampling type {}".format(downsampling))
self._downsampling_type = downsampling
self.last_relu = last_relu
Block = ResidualBlock if not preact else PreActResidualBlock
super(CifarResNet, self).__init__()
self.conv_1_3x3 = nn.Conv2d(channels, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = MCBatchNorm2d(nf)
self.stage_1 = self._make_layer(Block, nf, increase_dim=False, n=n)
self.stage_2 = self._make_layer(Block, nf, increase_dim=True, n=n - 1)
self.stage_3 = self._make_layer(Block, 2 * nf, increase_dim=True, n=n - 2)
self.stage_4 = Block(
4 * nf, increase_dim=False, last_relu=False, downsampling=self._downsampling_type
)
if pooling_config["type"] == "avg":
self.pool = nn.AdaptiveAvgPool2d((1, 1))
elif pooling_config["type"] == "weldon":
self.pool = pooling.WeldonPool2d(**pooling_config)
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.out_dim = 4 * nf
if final_layer in (True, "conv"):
self.final_layer = nn.Conv2d(self.out_dim, self.out_dim, kernel_size=1, bias=False)
elif isinstance(final_layer, dict):
if final_layer["type"] == "one_layer":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
elif final_layer["type"] == "two_layers":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, self.out_dim), nn.BatchNorm1d(self.out_dim),
nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
else:
raise ValueError("Unknown final layer type {}.".format(final_layer["type"]))
else:
self.final_layer = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, MCBatchNorm2d):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if zero_residual:
for m in self.modules():
if isinstance(m, ResidualBlock):
nn.init.constant_(m.bn_b.bn.weight, 0)
def _make_layer(self, Block, planes, increase_dim=False, n=None):
layers = []
if increase_dim:
layers.append(
Block(
planes,
increase_dim=True,
last_relu=False,
downsampling=self._downsampling_type
)
)
planes = 2 * planes
for i in range(n):
layers.append(Block(planes, last_relu=False, downsampling=self._downsampling_type))
return Stage(layers, block_relu=self.last_relu)
@property
def last_conv(self):
return self.stage_4.conv_b
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
feats_s1, x = self.stage_1(x)
feats_s2, x = self.stage_2(x)
feats_s3, x = self.stage_3(x)
x = self.stage_4(x)
raw_features = self.end_features(x)
features = self.end_features(F.relu(x, inplace=False))
if self.all_attentions:
attentions = [*feats_s1, *feats_s2, *feats_s3, x]
else:
attentions = [feats_s1[-1], feats_s2[-1], feats_s3[-1], x]
return {"raw_features": raw_features, "features": features, "attention": attentions}
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
if self.final_layer is not None:
x = self.final_layer(x)
return x
def clear_records(self):
self.bn_1.clear_records()
self.stage_4.bn_a.clear_records()
self.stage_4.bn_b.clear_records()
for stage in [self.stage_1, self.stage_2, self.stage_3]:
for block in stage.blocks:
block.bn_a.clear_records()
block.bn_b.clear_records()
def record_mode(self):
self.bn_1.record_mode()
self.stage_4.bn_a.record_mode()
self.stage_4.bn_b.record_mode()
for stage in [self.stage_1, self.stage_2, self.stage_3]:
for block in stage.blocks:
block.bn_a.record_mode()
block.bn_b.record_mode()
def normal_mode(self):
self.bn_1.normal_mode()
self.stage_4.bn_a.normal_mode()
self.stage_4.bn_b.normal_mode()
for stage in [self.stage_1, self.stage_2, self.stage_3]:
for block in stage.blocks:
block.bn_a.normal_mode()
block.bn_b.normal_mode()
def sampling_mode(self):
self.bn_1.sampling_mode()
self.stage_4.bn_a.sampling_mode()
self.stage_4.bn_b.sampling_mode()
for stage in [self.stage_1, self.stage_2, self.stage_3]:
for block in stage.blocks:
block.bn_a.sampling_mode()
block.bn_b.sampling_mode()
def resnet_rebuffi(n=5, **kwargs):
return CifarResNet(n=n, **kwargs)
| 11,898 | 28.972292 | 98 | py |
AFC | AFC-master/inclearn/convnet/my_resnet_importance.py | """Pytorch port of the resnet used for CIFAR100 by iCaRL.
https://github.com/srebuffi/iCaRL/blob/master/iCaRL-TheanoLasagne/utils_cifar100.py
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from inclearn.lib import pooling
import pdb
logger = logging.getLogger(__name__)
def gradhook(self, grad_input, grad_output):
importance = grad_output[0] ** 2 # [N, C, H, W]
if len(importance.shape) == 4:
importance = torch.sum(importance, 3) # [N, C, H]
importance = torch.sum(importance, 2) # [N, C]
importance = torch.mean(importance, 0) # [C]
self.importance += importance
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(inplanes, planes, stride=2, kernel_size=1, bias=False),
nn.BatchNorm2d(planes),
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False, downsampling="stride"):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
if increase_dim:
if downsampling == "stride":
self.downsampler = DownsampleStride()
self._need_pad = True
else:
self.downsampler = DownsampleConv(inplanes, planes)
self._need_pad = False
self.last_relu = last_relu
@staticmethod
def pad(x):
return torch.cat((x, x.mul(0)), 1)
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsampler(x)
if self._need_pad:
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class PreActResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False):
super().__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_b = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last_relu = last_relu
def forward(self, x):
y = self.bn_a(x)
y = F.relu(y, inplace=True)
y = self.conv_a(x)
y = self.bn_b(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class Stage(nn.Module):
def __init__(self, blocks, block_relu=False):
super().__init__()
self.blocks = nn.ModuleList(blocks)
self.block_relu = block_relu
def forward(self, x):
intermediary_features = []
for b in self.blocks:
x = b(x)
intermediary_features.append(x)
if self.block_relu:
x = F.relu(x)
return intermediary_features, x
class Channel_Importance_Measure(nn.Module):
def __init__(self, num_channels):
super().__init__()
self.num_channels = num_channels
self.scale = nn.Parameter(torch.randn(num_channels), requires_grad=False)
nn.init.constant_(self.scale, 1.0)
self.register_buffer('importance', torch.zeros_like(self.scale))
def forward(self, x):
if len(x.shape) == 4:
x = x * self.scale.reshape([1,-1,1,1])
else:
x = x * self.scale.reshape([1,-1])
return x
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(
self,
n=5,
nf=16,
channels=3,
preact=False,
zero_residual=True,
pooling_config={"type": "avg"},
downsampling="stride",
final_layer=False,
all_attentions=False,
last_relu=False,
classifier_no_act=True,
**kwargs
):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
if kwargs:
raise ValueError("Unused kwargs: {}.".format(kwargs))
self.all_attentions = all_attentions
logger.info("Downsampling type {}".format(downsampling))
self._downsampling_type = downsampling
self.last_relu = last_relu
Block = ResidualBlock if not preact else PreActResidualBlock
super(CifarResNet, self).__init__()
self.conv_1_3x3 = nn.Conv2d(channels, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(nf)
self.stage_1 = self._make_layer(Block, nf, increase_dim=False, n=n)
self.stage_1_importance = Channel_Importance_Measure(nf)
self.stage_2 = self._make_layer(Block, nf, increase_dim=True, n=n - 1)
self.stage_2_importance = Channel_Importance_Measure(2*nf)
self.stage_3 = self._make_layer(Block, 2 * nf, increase_dim=True, n=n - 2)
self.stage_3_importance = Channel_Importance_Measure(4*nf)
self.stage_4 = Block(
4 * nf, increase_dim=False, last_relu=False, downsampling=self._downsampling_type
)
self.stage_4_importance = Channel_Importance_Measure(4*nf)
self.raw_features_importance = Channel_Importance_Measure(4*nf)
self._hook = None
self.classifier_no_act = classifier_no_act
if pooling_config["type"] == "avg":
self.pool = nn.AdaptiveAvgPool2d((1, 1))
elif pooling_config["type"] == "weldon":
self.pool = pooling.WeldonPool2d(**pooling_config)
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.out_dim = 4 * nf
if final_layer in (True, "conv"):
self.final_layer = nn.Conv2d(self.out_dim, self.out_dim, kernel_size=1, bias=False)
elif isinstance(final_layer, dict):
if final_layer["type"] == "one_layer":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
elif final_layer["type"] == "two_layers":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(inplace=True),
nn.Linear(self.out_dim, self.out_dim), nn.BatchNorm1d(self.out_dim),
nn.ReLU(inplace=True),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
self.out_dim = int(self.out_dim * final_layer["reduction_factor"])
else:
raise ValueError("Unknown final layer type {}.".format(final_layer["type"]))
else:
self.final_layer = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if zero_residual:
for m in self.modules():
if isinstance(m, ResidualBlock):
nn.init.constant_(m.bn_b.weight, 0)
def _make_layer(self, Block, planes, increase_dim=False, n=None):
layers = []
if increase_dim:
layers.append(
Block(
planes,
increase_dim=True,
last_relu=False,
downsampling=self._downsampling_type
)
)
planes = 2 * planes
for i in range(n):
layers.append(Block(planes, last_relu=False, downsampling=self._downsampling_type))
return Stage(layers, block_relu=self.last_relu)
@property
def last_conv(self):
return self.stage_4.conv_b
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
feats_s1, x = self.stage_1(x)
x = self.stage_1_importance(x)
feats_s2, x = self.stage_2(x)
x = self.stage_2_importance(x)
feats_s3, x = self.stage_3(x)
x = self.stage_3_importance(x)
x = self.stage_4(x)
x = self.stage_4_importance(x)
raw_features = self.end_features(x)
features = self.end_features(F.relu(x, inplace=False))
if self.classifier_no_act:
raw_features = self.raw_features_importance(raw_features)
else:
features = self.raw_features_importance(features)
if self.all_attentions:
attentions = [*feats_s1, *feats_s2, *feats_s3, x]
else:
attentions = [feats_s1[-1], feats_s2[-1], feats_s3[-1], x]
importance = [self.stage_1_importance.importance,
self.stage_2_importance.importance,
self.stage_3_importance.importance,
self.stage_4_importance.importance,
self.raw_features_importance.importance]
return {"raw_features": raw_features, "features": features, "attention": attentions,
"importance": importance}
def start_cal_importance(self):
self._hook = [self.stage_1_importance.register_backward_hook(gradhook),
self.stage_2_importance.register_backward_hook(gradhook),
self.stage_3_importance.register_backward_hook(gradhook),
self.stage_4_importance.register_backward_hook(gradhook),
self.raw_features_importance.register_backward_hook(gradhook)]
def reset_importance(self):
self.stage_1_importance.importance.zero_()
self.stage_2_importance.importance.zero_()
self.stage_3_importance.importance.zero_()
self.stage_4_importance.importance.zero_()
self.raw_features_importance.importance.zero_()
def normalize_importance(self):
total_importance = torch.mean(self.stage_1_importance.importance)
self.stage_1_importance.importance = self.stage_1_importance.importance/total_importance
total_importance = torch.mean(self.stage_2_importance.importance)
self.stage_2_importance.importance = self.stage_2_importance.importance/total_importance
total_importance = torch.mean(self.stage_3_importance.importance)
self.stage_3_importance.importance = self.stage_3_importance.importance/total_importance
total_importance = torch.mean(self.stage_4_importance.importance)
self.stage_4_importance.importance = self.stage_4_importance.importance/total_importance
total_importance = torch.mean(self.raw_features_importance.importance)
self.raw_features_importance.importance = self.raw_features_importance.importance/total_importance
def stop_cal_importance(self):
for hook in self._hook:
hook.remove()
self._hook = None
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
if self.final_layer is not None:
x = self.final_layer(x)
return x
def resnet_rebuffi(n=5, **kwargs):
return CifarResNet(n=n, **kwargs)
| 13,049 | 31.625 | 106 | py |
AFC | AFC-master/inclearn/convnet/my_resnet_imagenet.py | """Pytorch port of the resnet used for CIFAR100 by iCaRL.
https://github.com/srebuffi/iCaRL/blob/master/iCaRL-TheanoLasagne/utils_cifar100.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from inclearn.lib import pooling
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(inplanes, planes, stride=2, kernel_size=1, bias=False),
nn.BatchNorm2d(planes),
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last=False, downsampling="stride"):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
if increase_dim:
if downsampling == "stride":
self.downsampler = DownsampleStride()
self.downsample = lambda x: self.pad(self.downsampler(x))
else:
self.downsample = DownsampleConv(inplanes, planes)
self.last = last
@staticmethod
def pad(x):
return torch.cat((x, x.mul(0)), 1)
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsample(x)
y = x + y
return y
class PreActResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last=False):
super().__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_b = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last = last
def forward(self, x):
y = self.bn_a(x)
y = F.relu(y, inplace=True)
y = self.conv_a(x)
y = self.bn_b(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
y = x + y
return y
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(
self,
n=5,
nf=16,
channels=3,
preact=False,
zero_residual=True,
pooling_config={"type": "avg"},
downsampling="stride",
final_layer=False,
**kwargs
):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
if kwargs:
raise ValueError("Unused kwargs: {}.".format(kwargs))
print("Downsampling type", downsampling)
self._downsampling_type = downsampling
Block = ResidualBlock if not preact else PreActResidualBlock
super(CifarResNet, self).__init__()
self.conv_1_3x3 = nn.Conv2d(channels, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(nf)
self.stage_1 = self._make_layer(Block, nf, increase_dim=False, n=n)
self.stage_2 = self._make_layer(Block, nf, increase_dim=True, n=n - 1)
self.stage_3 = self._make_layer(Block, 2 * nf, increase_dim=True, n=n - 2)
self.stage_4 = Block(
4 * nf, increase_dim=False, last=True, downsampling=self._downsampling_type
)
if pooling_config["type"] == "avg":
self.pool = nn.AvgPool2d(8)
elif pooling_config["type"] == "weldon":
self.pool = pooling.WeldonPool2d(**pooling_config)
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.out_dim = 4 * nf
if final_layer in (True, "conv"):
self.final_layer = nn.Conv2d(self.out_dim, self.out_dim, kernel_size=1, bias=False)
elif isinstance(final_layer, dict):
if final_layer["type"] == "bn_relu_fc":
self.final_layer = nn.Sequential(
nn.BatchNorm1d(self.out_dim), nn.ReLU(),
nn.Linear(self.out_dim, int(self.out_dim * final_layer["reduction_factor"]))
)
else:
raise ValueError("Unknown final layer type {}.".format(final_layer["type"]))
else:
self.final_layer = lambda x: x
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_residual:
for m in self.modules():
if isinstance(m, ResidualBlock):
nn.init.constant_(m.bn_b.weight, 0)
def _make_layer(self, Block, planes, increase_dim=False, last=False, n=None):
layers = []
if increase_dim:
layers.append(Block(planes, increase_dim=True, downsampling=self._downsampling_type))
planes = 2 * planes
for i in range(n):
layers.append(Block(planes, downsampling=self._downsampling_type))
return nn.Sequential(*layers)
def forward(self, x, attention_hook=False):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x_s1 = self.stage_1(x)
x_s2 = self.stage_2(x_s1)
x_s3 = self.stage_3(x_s2)
x_s4 = self.stage_4(x_s3)
raw_features = self.end_features(x_s4)
features = self.end_features(F.relu(x_s4, inplace=False))
if attention_hook:
return raw_features, features, [x_s1, x_s2, x_s3, x_s4]
return raw_features, features
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.final_layer(x)
return x
def resnet_rebuffi(n=5, **kwargs):
return CifarResNet(n=n, **kwargs)
| 7,242 | 27.972 | 97 | py |
AFC | AFC-master/inclearn/convnet/tools/conv_mtl.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/pytorch/pytorch
## Tianjin University
## liuyaoyao@tju.edu.cn
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##
## Modified by: Arthur Douillard in order to apply to Incremental Learning
## Quoting Yaoyao Liu:
## For the first incremental phase, we train the network without SS weights.
## For the second incremental phase, we initialize SS weights with ones and
## zeros as MTL, we update SS weights and keep the network frozen. For the
## following incremental phases, at the beginning of each incremental phase,
## we apply the SS weights learned last phase to the frozen network and get
## a new network. Then we freeze the new network, reset SS weights and update
## SS weights during training;
##
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" MTL CONV layers. """
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import Conv2d
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
class Conv2dMtl(Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
out_channels = self.weight.shape[0]
in_channels = self.weight.shape[1]
self.mtl_weight = Parameter(torch.ones(out_channels, in_channels, 1, 1))
self.mtl_bias = Parameter(torch.zeros(out_channels))
self._apply_mtl = False
self._apply_mtl_bias = False
self._apply_bias_on_weights = False
self.reset_mtl_parameters()
def conv2d_forward(self, input, weight):
if self.apply_mtl:
weight = self.weight.mul(self.mtl_weight.expand(self.weight.shape))
if self.apply_bias_on_weights:
weight = weight.add(self.mtl_bias[..., None, None, None].expand(self.weight.shape))
else:
weight = self.weight
if self.bias and self.apply_mtl and not self.apply_bias_on_weights:
bias = self.bias + self.mtl_bias
elif self.apply_mtl_bias and not self.apply_bias_on_weights:
bias = self.mtl_bias
else:
bias = self.bias
return F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
def reset_mtl_parameters(self):
self.mtl_weight.data.uniform_(1, 1)
if self.bias is not None:
self.mtl_bias.data.uniform_(0, 0)
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
@property
def apply_mtl(self):
return self._apply_mtl
@property
def apply_mtl_bias(self):
return self._apply_mtl_bias
@apply_mtl.setter
def apply_mtl(self, b):
assert isinstance(b, bool), b
self._apply_mtl = b
@apply_mtl_bias.setter
def apply_mtl_bias(self, b):
assert isinstance(b, bool), b
self._apply_mtl_bias = b
@property
def apply_bias_on_weights(self):
return self._apply_bias_on_weights
@apply_bias_on_weights.setter
def apply_bias_on_weights(self, b):
assert isinstance(b, bool), b
self._apply_bias_on_weights = b
def freeze_convnet(self, freeze):
self.weight.requires_grad = not freeze
if self.bias:
self.bias.requires_grad = not freeze
def fuse_mtl_weights(self):
with torch.no_grad():
new_mtl_weight = self.mtl_weight.expand(self.weight.shape)
self.weight.mul_(new_mtl_weight)
if self.apply_bias_on_weights:
self.weight.add_(self.mtl_bias[..., None, None, None].expand(self.weight.shape))
elif self.bias:
self.bias.add_(self.mtl_bias)
| 4,162 | 33.122951 | 99 | py |
AFC | AFC-master/inclearn/models/base.py | import abc
import logging
import os
import torch
LOGGER = logging.Logger("IncLearn", level="INFO")
logger = logging.getLogger(__name__)
class IncrementalLearner(abc.ABC):
"""Base incremental learner.
Methods are called in this order (& repeated for each new task):
1. set_task_info
2. before_task
3. train_task
4. after_task
5. eval_task
"""
def __init__(self, *args, **kwargs):
self._network = None
def set_task_info(self, task_info):
self._task = task_info["task"]
self._total_n_classes = task_info["total_n_classes"]
self._task_size = task_info["increment"]
self._n_train_data = task_info["n_train_data"]
self._n_test_data = task_info["n_test_data"]
self._n_tasks = task_info["max_task"]
def before_task(self, train_loader, val_loader):
LOGGER.info("Before task")
self.eval()
self._before_task(train_loader, val_loader)
def train_task(self, train_loader, val_loader):
LOGGER.info("train task")
self.train()
self._train_task(train_loader, val_loader)
def after_task_intensive(self, inc_dataset):
LOGGER.info("after task")
self.eval()
self._after_task_intensive(inc_dataset)
def after_task(self, inc_dataset):
LOGGER.info("after task")
self.eval()
self._after_task(inc_dataset)
def eval_task(self, data_loader):
LOGGER.info("eval task")
self.eval()
return self._eval_task(data_loader)
def get_memory(self):
return None
def get_val_memory(self):
return None
def _before_task(self, data_loader, val_loader):
pass
def _train_task(self, train_loader, val_loader):
raise NotImplementedError
def _after_task_intensive(self, data_loader):
pass
def _after_task(self, data_loader):
pass
def _eval_task(self, data_loader):
raise NotImplementedError
def save_metadata(self, directory, run_id):
pass
def load_metadata(self, directory, run_id):
pass
@property
def _new_task_index(self):
return self._task * self._task_size
@property
def inc_dataset(self):
return self.__inc_dataset
@inc_dataset.setter
def inc_dataset(self, inc_dataset):
self.__inc_dataset = inc_dataset
@property
def network(self):
return self._network
def save_parameters(self, directory, run_id):
path = os.path.join(directory, f"net_{run_id}_task_{self._task}.pth")
logger.info(f"Saving model at {path}.")
torch.save(self.network.state_dict(), path)
def load_parameters(self, directory, run_id):
path = os.path.join(directory, f"net_{run_id}_task_{self._task}.pth")
if not os.path.exists(path):
return
logger.info(f"Loading model at {path}.")
try:
self.network.load_state_dict(torch.load(path))
except Exception:
logger.warning("Old method to save weights, it's deprecated!")
self._network = torch.load(path)
def eval(self):
self._network.eval()
def train(self):
self._network.train()
| 3,228 | 24.832 | 77 | py |
AFC | AFC-master/inclearn/models/afc.py | import copy
import logging
import math
import numpy as np
import torch
from torch.nn import functional as F
from inclearn.lib import data, factory, losses, network, utils
from inclearn.lib.data import samplers
from inclearn.models.icarl import ICarl
logger = logging.getLogger(__name__)
class AFC(ICarl):
def __init__(self, args):
self._disable_progressbar = args.get("no_progressbar", False)
self._device = args["device"][0]
self._multiple_devices = args["device"]
# Optimization:
self._batch_size = args["batch_size"]
self._opt_name = args["optimizer"]
self._lr = args["lr"]
self._weight_decay = args["weight_decay"]
self._n_epochs = args["epochs"]
self._scheduling = args["scheduling"]
self._lr_decay = args["lr_decay"]
# Rehearsal Learning:
self._memory_size = args["memory_size"]
self._fixed_memory = args["fixed_memory"]
self._herding_selection = args.get("herding_selection", {"type": "icarl"})
self._n_classes = 0
self._last_results = None
self._validation_percent = args.get("validation")
self._feature_distil = args.get("feature_distil", {})
self._nca_config = args.get("nca", {})
self._softmax_ce = args.get("softmax_ce", False)
self._pod_flat_config = args.get("pod_flat", {})
self._pod_spatial_config = args.get("pod_spatial", {})
self._perceptual_features = args.get("perceptual_features")
self._perceptual_style = args.get("perceptual_style")
self._groupwise_factors = args.get("groupwise_factors", {})
self._groupwise_factors_bis = args.get("groupwise_factors_bis", {})
self._class_weights_config = args.get("class_weights_config", {})
self._evaluation_type = args.get("eval_type", "icarl")
self._evaluation_config = args.get("evaluation_config", {})
self._eval_every_x_epochs = args.get("eval_every_x_epochs")
self._early_stopping = args.get("early_stopping", {})
self._gradcam_distil = args.get("gradcam_distil", {})
classifier_kwargs = args.get("classifier_config", {})
self._network = network.BasicNet(
args["convnet"],
convnet_kwargs=args.get("convnet_config", {}),
classifier_kwargs=classifier_kwargs,
postprocessor_kwargs=args.get("postprocessor_config", {}),
device=self._device,
return_features=True,
extract_no_act=True,
classifier_no_act=args.get("classifier_no_act", True),
attention_hook=True,
gradcam_hook=bool(self._gradcam_distil)
)
self._examplars = {}
self._means = None
self._old_model = None
self._finetuning_config = args.get("finetuning_config")
self._herding_indexes = []
self._weight_generation = args.get("weight_generation")
self._meta_transfer = args.get("meta_transfer", {})
if self._meta_transfer:
assert "mtl" in args["convnet"]
self._post_processing_type = None
self._data_memory, self._targets_memory = None, None
self._args = args
self._args["_logs"] = {}
@property
def _memory_per_class(self):
"""Returns the number of examplars per class."""
if self._fixed_memory:
return self._memory_size // self._total_n_classes
return self._memory_size // self._n_classes
def _train_task(self, train_loader, val_loader):
if self._meta_transfer:
logger.info("Setting task meta-transfer")
self.set_meta_transfer()
for p in self._network.parameters():
if p.requires_grad:
p.register_hook(lambda grad: torch.clamp(grad, -5., 5.))
logger.debug("nb {}.".format(len(train_loader.dataset)))
if self._meta_transfer.get("clip"):
logger.info(f"Clipping MTL weights ({self._meta_transfer.get('clip')}).")
clipper = BoundClipper(*self._meta_transfer.get("clip"))
else:
clipper = None
self._training_step(
train_loader, val_loader, 0, self._n_epochs, record_bn=True, clipper=clipper
)
self._post_processing_type = None
if self._finetuning_config and self._task != 0:
logger.info("Fine-tuning")
if self._finetuning_config["scaling"]:
logger.info(
"Custom fine-tuning scaling of {}.".format(self._finetuning_config["scaling"])
)
self._post_processing_type = self._finetuning_config["scaling"]
if self._finetuning_config["sampling"] == "undersampling":
self._data_memory, self._targets_memory, _, _ = self.build_examplars(
self.inc_dataset, self._herding_indexes
)
loader = self.inc_dataset.get_memory_loader(*self.get_memory())
elif self._finetuning_config["sampling"] == "oversampling":
_, loader = self.inc_dataset.get_custom_loader(
list(range(self._n_classes - self._task_size, self._n_classes)),
memory=self.get_memory(),
mode="train",
sampler=samplers.MemoryOverSampler
)
if self._finetuning_config["tuning"] == "all":
parameters = self._network.parameters()
elif self._finetuning_config["tuning"] == "convnet":
parameters = self._network.convnet.parameters()
elif self._finetuning_config["tuning"] == "classifier":
parameters = self._network.classifier.parameters()
elif self._finetuning_config["tuning"] == "classifier_scale":
parameters = [
{
"params": self._network.classifier.parameters(),
"lr": self._finetuning_config["lr"]
}, {
"params": self._network.post_processor.parameters(),
"lr": self._finetuning_config["lr"]
}
]
else:
raise NotImplementedError(
"Unknwown finetuning parameters {}.".format(self._finetuning_config["tuning"])
)
self._optimizer = factory.get_optimizer(
parameters, self._opt_name, self._finetuning_config["lr"], self.weight_decay
)
self._scheduler = None
self._training_step(
loader,
val_loader,
self._n_epochs,
self._n_epochs + self._finetuning_config["epochs"],
record_bn=False
)
self._update_importance(train_loader)
def _update_importance(self, train_loader):
if len(self._multiple_devices) > 1:
logger.info("Duplicating model on {} gpus.".format(len(self._multiple_devices)))
training_network = nn.DataParallel(self._network, self._multiple_devices)
else:
training_network = self._network
training_network.convnet.reset_importance()
training_network.convnet.start_cal_importance()
for i, input_dict in enumerate(train_loader):
inputs, targets = input_dict["inputs"], input_dict["targets"]
memory_flags = input_dict["memory_flags"]
inputs, targets = inputs.to(self._device), targets.to(self._device)
outputs = training_network(inputs)
logits = outputs["logits"]
if self._post_processing_type is None:
scaled_logits = self._network.post_process(logits)
else:
scaled_logits = logits * self._post_processing_type
if self._nca_config:
nca_config = copy.deepcopy(self._nca_config)
if self._network.post_processor:
nca_config["scale"] = self._network.post_processor.factor
loss = losses.nca(
logits,
targets,
memory_flags=memory_flags,
**nca_config
)
elif self._softmax_ce:
# Classification loss is cosine + learned factor + softmax:
loss = F.cross_entropy(scaled_logits, targets)
loss.backward()
training_network.convnet.stop_cal_importance()
training_network.convnet.normalize_importance()
@property
def weight_decay(self):
if isinstance(self._weight_decay, float):
return self._weight_decay
elif isinstance(self._weight_decay, dict):
start, end = self._weight_decay["start"], self._weight_decay["end"]
step = (max(start, end) - min(start, end)) / (self._n_tasks - 1)
factor = -1 if start > end else 1
return start + factor * self._task * step
raise TypeError(
"Invalid type {} for weight decay: {}.".format(
type(self._weight_decay), self._weight_decay
)
)
def _after_task(self, inc_dataset):
if self._gradcam_distil:
self._network.zero_grad()
self._network.unset_gradcam_hook()
self._old_model = self._network.copy().eval().to(self._device)
self._network.on_task_end()
self._network.set_gradcam_hook()
self._old_model.set_gradcam_hook()
else:
super()._after_task(inc_dataset)
def _eval_task(self, test_loader):
if self._evaluation_type in ("icarl", "nme"):
return super()._eval_task(test_loader)
elif self._evaluation_type in ("softmax", "cnn"):
ypred = []
ytrue = []
for input_dict in test_loader:
ytrue.append(input_dict["targets"].numpy())
inputs = input_dict["inputs"].to(self._device)
logits = self._network(inputs)["logits"].detach()
preds = F.softmax(logits, dim=-1)
ypred.append(preds.cpu().numpy())
ypred = np.concatenate(ypred)
ytrue = np.concatenate(ytrue)
self._last_results = (ypred, ytrue)
return ypred, ytrue
else:
raise ValueError(self._evaluation_type)
def _gen_weights(self):
if self._weight_generation:
utils.add_new_weights(
self._network, self._weight_generation if self._task != 0 else "basic",
self._n_classes, self._task_size, self.inc_dataset
)
def _before_task(self, train_loader, val_loader):
self._gen_weights()
self._n_classes += self._task_size
logger.info("Now {} examplars per class.".format(self._memory_per_class))
if self._groupwise_factors and isinstance(self._groupwise_factors, dict):
if self._groupwise_factors_bis and self._task > 0:
logger.info("Using second set of groupwise lr.")
groupwise_factor = self._groupwise_factors_bis
else:
groupwise_factor = self._groupwise_factors
params = []
for group_name, group_params in self._network.get_group_parameters().items():
if group_params is None or group_name == "last_block":
continue
factor = groupwise_factor.get(group_name, 1.0)
if factor == 0.:
continue
params.append({"params": group_params, "lr": self._lr * factor})
print(f"Group: {group_name}, lr: {self._lr * factor}.")
elif self._groupwise_factors == "ucir":
params = [
{
"params": self._network.convnet.parameters(),
"lr": self._lr
},
{
"params": self._network.classifier.new_weights,
"lr": self._lr
},
]
else:
params = self._network.parameters()
self._optimizer = factory.get_optimizer(params, self._opt_name, self._lr, self.weight_decay)
self._scheduler = factory.get_lr_scheduler(
self._scheduling,
self._optimizer,
nb_epochs=self._n_epochs,
lr_decay=self._lr_decay,
task=self._task
)
if self._class_weights_config:
self._class_weights = torch.tensor(
data.get_class_weights(train_loader.dataset, **self._class_weights_config)
).to(self._device)
else:
self._class_weights = None
def _compute_loss(self, inputs, outputs, targets, onehot_targets, memory_flags):
features, logits, atts = outputs["raw_features"], outputs["logits"], outputs["attention"]
if self._post_processing_type is None:
scaled_logits = self._network.post_process(logits)
else:
scaled_logits = logits * self._post_processing_type
if self._old_model is not None:
with torch.no_grad():
old_outputs = self._old_model(inputs)
old_features = old_outputs["raw_features"]
old_atts = old_outputs["attention"]
old_importance = old_outputs["importance"]
if self._nca_config:
nca_config = copy.deepcopy(self._nca_config)
if self._network.post_processor:
nca_config["scale"] = self._network.post_processor.factor
loss = losses.nca(
logits,
targets,
memory_flags=memory_flags,
class_weights=self._class_weights,
**nca_config
)
self._metrics["nca"] += loss.item()
elif self._softmax_ce:
loss = F.cross_entropy(scaled_logits, targets)
self._metrics["cce"] += loss.item()
# --------------------
# Distillation losses:
# --------------------
if self._old_model is not None:
if self._feature_distil:
if self._feature_distil.get("scheduled_factor", False):
factor = self._feature_distil["scheduled_factor"] * math.sqrt(
self._n_classes / self._task_size
)
else:
factor = self._feature_distil.get("factor", 1.)
feature_distil_factor = old_importance[:-1]
feature_distil_loss = factor * losses.pod(
old_atts,
atts,
memory_flags=memory_flags.bool(),
feature_distil_factor=feature_distil_factor,
**self._feature_distil,
)
loss += feature_distil_loss
self._metrics["f_distil"] += feature_distil_loss.item()
return loss
class BoundClipper:
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __call__(self, module):
if hasattr(module, "mtl_weight"):
module.mtl_weight.data.clamp_(min=self.lower_bound, max=self.upper_bound)
if hasattr(module, "mtl_bias"):
module.mtl_bias.data.clamp_(min=self.lower_bound, max=self.upper_bound)
| 15,486 | 36.773171 | 100 | py |
AFC | AFC-master/inclearn/models/icarl.py | import collections
import copy
import logging
import os
import pickle
import numpy as np
import torch
from scipy.spatial.distance import cdist
from torch import nn
from torch.nn import functional as F
from tqdm import tqdm
from inclearn.lib import factory, herding, losses, network, schedulers, utils
from inclearn.lib.network import hook
from inclearn.models.base import IncrementalLearner
EPSILON = 1e-8
logger = logging.getLogger(__name__)
class ICarl(IncrementalLearner):
"""Implementation of iCarl.
# References:
- iCaRL: Incremental Classifier and Representation Learning
Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, Christoph H. Lampert
https://arxiv.org/abs/1611.07725
:param args: An argparse parsed arguments object.
"""
def __init__(self, args):
super().__init__()
self._disable_progressbar = args.get("no_progressbar", False)
self._device = args["device"][0]
self._multiple_devices = args["device"]
self._opt_name = args["optimizer"]
self._lr = args["lr"]
self._weight_decay = args["weight_decay"]
self._n_epochs = args["epochs"]
self._scheduling = args["scheduling"]
self._lr_decay = args["lr_decay"]
self._warmup_config = args.get("warmup", {})
if self._warmup_config and self._warmup_config["total_epoch"] > 0:
self._lr /= self._warmup_config["multiplier"]
self._eval_every_x_epochs = args.get("eval_every_x_epochs")
self._early_stopping = args.get("early_stopping", {})
self._memory_size = args["memory_size"]
self._fixed_memory = args["fixed_memory"]
self._herding_selection = args.get("herding_selection", {"type": "icarl"})
self._n_classes = 0
self._last_results = None
self._validation_percent = args["validation"]
self._rotations_config = args.get("rotations_config", {})
self._random_noise_config = args.get("random_noise_config", {})
self._network = network.BasicNet(
args["convnet"],
convnet_kwargs=args.get("convnet_config", {}),
classifier_kwargs=args.get("classifier_config", {
"type": "fc",
"use_bias": True
}),
device=self._device,
extract_no_act=True,
classifier_no_act=False,
rotations_predictor=bool(self._rotations_config)
)
self._examplars = {}
self._means = None
self._herding_indexes = []
self._data_memory, self._targets_memory = None, None
self._old_model = None
self._clf_loss = F.binary_cross_entropy_with_logits
self._distil_loss = F.binary_cross_entropy_with_logits
self._epoch_metrics = collections.defaultdict(list)
self._meta_transfer = args.get("meta_transfer", {})
def set_meta_transfer(self):
if self._meta_transfer["type"] not in ("repeat", "once", "none"):
raise ValueError(f"Invalid value for meta-transfer {self._meta_transfer}.")
if self._task == 0:
self._network.convnet.apply_mtl(False)
elif self._task == 1:
if self._meta_transfer["type"] != "none":
self._network.convnet.apply_mtl(True)
if self._meta_transfer.get("mtl_bias"):
self._network.convnet.apply_mtl_bias(True)
elif self._meta_transfer.get("bias_on_weight"):
self._network.convnet.apply_bias_on_weights(True)
if self._meta_transfer["freeze_convnet"]:
self._network.convnet.freeze_convnet(
True,
bn_weights=self._meta_transfer.get("freeze_bn_weights"),
bn_stats=self._meta_transfer.get("freeze_bn_stats")
)
elif self._meta_transfer["type"] != "none":
if self._meta_transfer["type"] == "repeat" or (
self._task == 2 and self._meta_transfer["type"] == "once"
):
self._network.convnet.fuse_mtl_weights()
self._network.convnet.reset_mtl_parameters()
if self._meta_transfer["freeze_convnet"]:
self._network.convnet.freeze_convnet(
True,
bn_weights=self._meta_transfer.get("freeze_bn_weights"),
bn_stats=self._meta_transfer.get("freeze_bn_stats")
)
def save_metadata(self, directory, run_id):
path = os.path.join(directory, f"meta_{run_id}_task_{self._task}.pkl")
logger.info("Saving metadata at {}.".format(path))
with open(path, "wb+") as f:
pickle.dump(
[self._data_memory, self._targets_memory, self._herding_indexes, self._class_means],
f
)
def load_metadata(self, directory, run_id):
path = os.path.join(directory, f"meta_{run_id}_task_{self._task}.pkl")
if not os.path.exists(path):
return
logger.info("Loading metadata at {}.".format(path))
with open(path, "rb") as f:
self._data_memory, self._targets_memory, self._herding_indexes, self._class_means = pickle.load(
f
)
@property
def epoch_metrics(self):
return dict(self._epoch_metrics)
# ----------
# Public API
# ----------
def _before_task(self, train_loader, val_loader):
self._n_classes += self._task_size
self._network.add_classes(self._task_size)
logger.info("Now {} examplars per class.".format(self._memory_per_class))
self._optimizer = factory.get_optimizer(
self._network.parameters(), self._opt_name, self._lr, self._weight_decay
)
base_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self._optimizer, self._scheduling, gamma=self._lr_decay
)
if self._warmup_config:
if self._warmup_config.get("only_first_step", True) and self._task != 0:
pass
else:
logger.info("Using WarmUp")
self._scheduler = schedulers.GradualWarmupScheduler(
optimizer=self._optimizer,
after_scheduler=base_scheduler,
**self._warmup_config
)
else:
self._scheduler = base_scheduler
def _train_task(self, train_loader, val_loader):
logger.debug("nb {}.".format(len(train_loader.dataset)))
self._training_step(train_loader, val_loader, 0, self._n_epochs)
def _training_step(
self, train_loader, val_loader, initial_epoch, nb_epochs, record_bn=True, clipper=None
):
best_epoch, best_acc = -1, -1.
wait = 0
grad, act = None, None
if len(self._multiple_devices) > 1:
logger.info("Duplicating model on {} gpus.".format(len(self._multiple_devices)))
training_network = nn.DataParallel(self._network, self._multiple_devices)
if self._network.gradcam_hook:
grad, act, back_hook, for_hook = hook.get_gradcam_hook(training_network)
training_network.module.convnet.last_conv.register_backward_hook(back_hook)
training_network.module.convnet.last_conv.register_forward_hook(for_hook)
else:
training_network = self._network
for epoch in range(initial_epoch, nb_epochs):
self._metrics = collections.defaultdict(float)
self._epoch_percent = epoch / (nb_epochs - initial_epoch)
if epoch == nb_epochs - 1 and record_bn and len(self._multiple_devices) == 1 and \
hasattr(training_network.convnet, "record_mode"):
logger.info("Recording BN means & vars for MCBN...")
training_network.convnet.clear_records()
training_network.convnet.record_mode()
prog_bar = tqdm(
train_loader,
disable=self._disable_progressbar,
ascii=True,
bar_format="{desc}: {percentage:3.0f}% | {n_fmt}/{total_fmt} | {rate_fmt}{postfix}"
)
for i, input_dict in enumerate(prog_bar, start=1):
inputs, targets = input_dict["inputs"], input_dict["targets"]
memory_flags = input_dict["memory_flags"]
if grad is not None:
_clean_list(grad)
_clean_list(act)
self._optimizer.zero_grad()
loss = self._forward_loss(
training_network,
inputs,
targets,
memory_flags,
gradcam_grad=grad,
gradcam_act=act
)
loss.backward()
self._optimizer.step()
if clipper:
training_network.apply(clipper)
self._print_metrics(prog_bar, epoch, nb_epochs, i)
if self._scheduler:
self._scheduler.step(epoch)
if self._eval_every_x_epochs and epoch != 0 and epoch % self._eval_every_x_epochs == 0:
self._network.eval()
self._data_memory, self._targets_memory, self._herding_indexes, self._class_means = self.build_examplars(
self.inc_dataset, self._herding_indexes
)
ytrue, ypred = self._eval_task(val_loader)
acc = 100 * round((ypred == ytrue).sum() / len(ytrue), 3)
logger.info("Val accuracy: {}".format(acc))
self._network.train()
if acc > best_acc:
best_epoch = epoch
best_acc = acc
wait = 0
else:
wait += 1
if self._early_stopping and self._early_stopping["patience"] > wait:
logger.warning("Early stopping!")
break
if self._eval_every_x_epochs:
logger.info("Best accuracy reached at epoch {} with {}%.".format(best_epoch, best_acc))
if len(self._multiple_devices) == 1 and hasattr(training_network.convnet, "record_mode"):
training_network.convnet.normal_mode()
def _print_metrics(self, prog_bar, epoch, nb_epochs, nb_batches):
pretty_metrics = ", ".join(
"{}: {}".format(metric_name, round(metric_value / nb_batches, 3))
for metric_name, metric_value in self._metrics.items()
)
prog_bar.set_description(
"T{}/{}, E{}/{} => {}".format(
self._task + 1, self._n_tasks, epoch + 1, nb_epochs, pretty_metrics
)
)
def _forward_loss(
self,
training_network,
inputs,
targets,
memory_flags,
gradcam_grad=None,
gradcam_act=None,
**kwargs
):
inputs, targets = inputs.to(self._device), targets.to(self._device)
onehot_targets = utils.to_onehot(targets, self._n_classes).to(self._device)
outputs = training_network(inputs)
if gradcam_act is not None:
outputs["gradcam_gradients"] = gradcam_grad
outputs["gradcam_activations"] = gradcam_act
loss = self._compute_loss(inputs, outputs, targets, onehot_targets, memory_flags)
if not utils.check_loss(loss):
raise ValueError("A loss is NaN: {}".format(self._metrics))
self._metrics["loss"] += loss.item()
return loss
def _after_task_intensive(self, inc_dataset):
if self._herding_selection["type"] == "confusion":
self._compute_confusion_matrix()
self._data_memory, self._targets_memory, self._herding_indexes, self._class_means = self.build_examplars(
inc_dataset, self._herding_indexes
)
def _after_task(self, inc_dataset):
self._old_model = self._network.copy().freeze().to(self._device)
self._network.on_task_end()
# self.plot_tsne()
def _compute_confusion_matrix(self):
use_validation = self._validation_percent > 0.
_, loader = self.inc_dataset.get_custom_loader(
list(range(self._n_classes - self._task_size, self._n_classes)),
memory=self.get_val_memory() if use_validation else self.get_memory(),
mode="test",
data_source="val" if use_validation else "train"
)
ypreds, ytrue = self._eval_task(loader)
self._last_results = (ypreds, ytrue)
def plot_tsne(self):
if self.folder_result:
loader = self.inc_dataset.get_custom_loader([], memory=self.get_memory())[1]
embeddings, targets = utils.extract_features(self._network, loader)
utils.plot_tsne(
os.path.join(self.folder_result, "tsne_{}".format(self._task)), embeddings, targets
)
def _eval_task(self, data_loader):
ypreds, ytrue = self.compute_accuracy(self._network, data_loader, self._class_means)
return ypreds, ytrue
# -----------
# Private API
# -----------
def _compute_loss(self, inputs, outputs, targets, onehot_targets, memory_flags):
logits = outputs["logits"]
if self._old_model is None:
loss = F.binary_cross_entropy_with_logits(logits, onehot_targets)
else:
with torch.no_grad():
old_targets = torch.sigmoid(self._old_model(inputs)["logits"])
new_targets = onehot_targets.clone()
new_targets[..., :-self._task_size] = old_targets
loss = F.binary_cross_entropy_with_logits(logits, new_targets)
if self._rotations_config:
rotations_loss = losses.unsupervised_rotations(
inputs, memory_flags, self._network, self._rotations_config
)
loss += rotations_loss
self._metrics["rot"] += rotations_loss.item()
return loss
@property
def _memory_per_class(self):
"""Returns the number of examplars per class."""
if self._fixed_memory:
return self._memory_size // self._total_n_classes
return self._memory_size // self._n_classes
# -----------------
# Memory management
# -----------------
def build_examplars(
self, inc_dataset, herding_indexes, memory_per_class=None, data_source="train"
):
logger.info("Building & updating memory.")
memory_per_class = memory_per_class or self._memory_per_class
herding_indexes = copy.deepcopy(herding_indexes)
data_memory, targets_memory = [], []
class_means = np.zeros((self._n_classes, self._network.features_dim))
for class_idx in range(self._n_classes):
# We extract the features, both normal and flipped:
inputs, loader = inc_dataset.get_custom_loader(
class_idx, mode="test", data_source=data_source
)
features, targets = utils.extract_features(self._network, loader)
features_flipped, _ = utils.extract_features(
self._network,
inc_dataset.get_custom_loader(class_idx, mode="flip", data_source=data_source)[1]
)
if class_idx >= self._n_classes - self._task_size:
# New class, selecting the examplars:
if self._herding_selection["type"] == "icarl":
selected_indexes = herding.icarl_selection(features, memory_per_class)
elif self._herding_selection["type"] == "closest":
selected_indexes = herding.closest_to_mean(features, memory_per_class)
elif self._herding_selection["type"] == "random":
selected_indexes = herding.random(features, memory_per_class)
elif self._herding_selection["type"] == "first":
selected_indexes = np.arange(memory_per_class)
elif self._herding_selection["type"] == "kmeans":
selected_indexes = herding.kmeans(
features, memory_per_class, k=self._herding_selection["k"]
)
elif self._herding_selection["type"] == "confusion":
selected_indexes = herding.confusion(
*self._last_results,
memory_per_class,
class_id=class_idx,
minimize_confusion=self._herding_selection["minimize_confusion"]
)
elif self._herding_selection["type"] == "var_ratio":
selected_indexes = herding.var_ratio(
memory_per_class, self._network, loader, **self._herding_selection
)
elif self._herding_selection["type"] == "mcbn":
selected_indexes = herding.mcbn(
memory_per_class, self._network, loader, **self._herding_selection
)
else:
raise ValueError(
"Unknown herding selection {}.".format(self._herding_selection)
)
herding_indexes.append(selected_indexes)
# Reducing examplars:
try:
selected_indexes = herding_indexes[class_idx][:memory_per_class]
herding_indexes[class_idx] = selected_indexes
except:
import pdb
pdb.set_trace()
# Re-computing the examplar mean (which may have changed due to the training):
examplar_mean = self.compute_examplar_mean(
features, features_flipped, selected_indexes, memory_per_class
)
data_memory.append(inputs[selected_indexes])
targets_memory.append(targets[selected_indexes])
class_means[class_idx, :] = examplar_mean
data_memory = np.concatenate(data_memory)
targets_memory = np.concatenate(targets_memory)
return data_memory, targets_memory, herding_indexes, class_means
def get_memory(self):
return self._data_memory, self._targets_memory
@staticmethod
def compute_examplar_mean(feat_norm, feat_flip, indexes, nb_max):
D = feat_norm.T
D = D / (np.linalg.norm(D, axis=0) + EPSILON)
D2 = feat_flip.T
D2 = D2 / (np.linalg.norm(D2, axis=0) + EPSILON)
selected_d = D[..., indexes]
selected_d2 = D2[..., indexes]
mean = (np.mean(selected_d, axis=1) + np.mean(selected_d2, axis=1)) / 2
mean /= (np.linalg.norm(mean) + EPSILON)
return mean
@staticmethod
def compute_accuracy(model, loader, class_means):
features, targets_ = utils.extract_features(model, loader)
features = (features.T / (np.linalg.norm(features.T, axis=0) + EPSILON)).T
# Compute score for iCaRL
sqd = cdist(class_means, features, 'sqeuclidean')
score_icarl = (-sqd).T
return score_icarl, targets_
def _clean_list(l):
for i in range(len(l)):
l[i] = None
| 19,231 | 37.007905 | 121 | py |
AFC | AFC-master/inclearn/lib/pooling.py | import torch
import torch.nn as nn
from torch.autograd import Function
class WeldonPool2d(nn.Module):
def __init__(self, kmax=1, kmin=None, **kwargs):
super(WeldonPool2d, self).__init__()
self.kmax = kmax
self.kmin = kmin
if self.kmin is None:
self.kmin = self.kmax
print("Using Weldon Pooling with kmax={}, kmin={}.".format(self.kmax, self.kmin))
self._pool_func = self._define_function()
def forward(self, input):
return self._pool_func(input)
def _define_function(self):
class WeldonPool2dFunction(Function):
@staticmethod
def get_number_of_instances(k, n):
if k <= 0:
return 0
elif k < 1:
return round(k * n)
elif k > n:
return int(n)
else:
return int(k)
@staticmethod
def forward(ctx, input):
# get batch information
batch_size = input.size(0)
num_channels = input.size(1)
h = input.size(2)
w = input.size(3)
# get number of regions
n = h * w
# get the number of max and min instances
kmax = WeldonPool2dFunction.get_number_of_instances(self.kmax, n)
kmin = WeldonPool2dFunction.get_number_of_instances(self.kmin, n)
# sort scores
sorted, indices = input.new(), input.new().long()
torch.sort(input.view(batch_size, num_channels, n), dim=2, descending=True, out=(sorted, indices))
# compute scores for max instances
indices_max = indices.narrow(2, 0, kmax)
output = sorted.narrow(2, 0, kmax).sum(2).div_(kmax)
if kmin > 0:
# compute scores for min instances
indices_min = indices.narrow(2, n-kmin, kmin)
output.add_(sorted.narrow(2, n-kmin, kmin).sum(2).div_(kmin)).div_(2)
# save input for backward
ctx.save_for_backward(indices_max, indices_min, input)
# return output with right size
return output.view(batch_size, num_channels)
@staticmethod
def backward(ctx, grad_output):
# get the input
indices_max, indices_min, input, = ctx.saved_tensors
# get batch information
batch_size = input.size(0)
num_channels = input.size(1)
h = input.size(2)
w = input.size(3)
# get number of regions
n = h * w
# get the number of max and min instances
kmax = WeldonPool2dFunction.get_number_of_instances(self.kmax, n)
kmin = WeldonPool2dFunction.get_number_of_instances(self.kmin, n)
# compute gradient for max instances
grad_output_max = grad_output.view(batch_size, num_channels, 1).expand(batch_size, num_channels, kmax)
grad_input = grad_output.new().resize_(batch_size, num_channels, n).fill_(0).scatter_(2, indices_max, grad_output_max).div_(kmax)
if kmin > 0:
# compute gradient for min instances
grad_output_min = grad_output.view(batch_size, num_channels, 1).expand(batch_size, num_channels, kmin)
grad_input_min = grad_output.new().resize_(batch_size, num_channels, n).fill_(0).scatter_(2, indices_min, grad_output_min).div_(kmin)
grad_input.add_(grad_input_min).div_(2)
return grad_input.view(batch_size, num_channels, h, w)
return WeldonPool2dFunction.apply
def __repr__(self):
return self.__class__.__name__ + ' (kmax=' + str(self.kmax
) + ', kmin=' + str(self.kmin) + ')'
| 4,029 | 37.75 | 153 | py |
AFC | AFC-master/inclearn/lib/callbacks.py | import copy
import torch
class Callback:
def __init__(self):
self._iteration = 0
self._in_training = True
@property
def in_training(self):
return self._in_training
def on_epoch_begin(self):
pass
def on_epoch_end(self, metric=None):
self._iteration += 1
def before_step(self):
pass
class GaussianNoiseAnnealing(Callback):
"""Add gaussian noise to the gradients.
Add gaussian noise to the gradients with the given mean & std. The std will
decrease at each batch up to 0.
# References:
- Adding Gradient Noise Improves Learning for Very Deep Networks
- https://arxiv.org/abs/1511.06807
:param eta: TODO
:param gamma: Decay rate.
"""
def __init__(self, parameters, eta=0.3, gamma=0.55):
self._parameters = parameters
self._eta = eta
self._gamma = gamma
super(GaussianNoiseAnnealing, self).__init__()
def before_step(self):
variance = self._eta / ((1 + self._iteration) ** self._gamma)
for param in self._parameters:
# Noise on gradients:
noise = torch.randn(param.grad.shape, device=param.grad.device) * variance
param.grad.add_(noise)
class EarlyStopping(Callback):
def __init__(self, network, minimize_metric=True, patience=5, epsilon=1e-3):
self._patience = patience
self._wait = 0
if minimize_metric:
self._cmp_fun = lambda old, new: (old - epsilon) > new
self._best = float('inf')
else:
self._cmp_fun = lambda old, new: (old + epsilon) < new
self._best = float("-inf")
self.network = network
self._record = []
super(EarlyStopping, self).__init__()
def on_epoch_end(self, metric):
self._record.append(metric)
if self._cmp_fun(self._best, metric):
self._best = metric
self._wait = 0
self.network = copy.deepcopy(self.network)
else:
self._wait += 1
if self._wait == self._patience:
print("Early stopping, metric is: {}.".format(metric))
print(self._record[-self._patience:])
self._in_training = False
super(EarlyStopping, self).on_epoch_end(metric=metric)
| 2,331 | 25.5 | 86 | py |
AFC | AFC-master/inclearn/lib/schedulers.py | import warnings
import numpy as np
from torch.optim.lr_scheduler import ReduceLROnPlateau, _LRScheduler
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
From: https://github.com/ildoonet/pytorch-gradual-warmup-lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None, **kwargs):
self.multiplier = multiplier
if self.multiplier <= 1.:
raise ValueError('multiplier should be greater than 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [
base_lr * self.multiplier for base_lr in self.base_lrs
]
self.finished = True
print("End of WarmUp.")
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [
base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs
]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1
# ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [
base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs
]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
class CosineWithRestarts(_LRScheduler): # pylint: disable=protected-access
"""
Cosine annealing with restarts.
This is decribed in the paper https://arxiv.org/abs/1608.03983.
Taken from: https://github.com/allenai/allennlp
Parameters
----------
optimizer : ``torch.optim.Optimizer``
t_max : ``int``
The maximum number of iterations within the first cycle.
eta_min : ``float``, optional (default=0)
The minimum learning rate.
last_epoch : ``int``, optional (default=-1)
The index of the last epoch. This is used when restarting.
factor : ``float``, optional (default=1)
The factor by which the cycle length (``T_max``) increases after each restart.
"""
def __init__(
self, optimizer, t_max: int, eta_min: float = 0., last_epoch: int = -1, factor: float = 1.
) -> None:
assert t_max > 0
assert eta_min >= 0
if t_max == 1 and factor == 1:
warnings.warning(
"Cosine annealing scheduler will have no effect on the learning "
"rate since T_max = 1 and factor = 1."
)
self.t_max = t_max
self.eta_min = eta_min
self.factor = factor
self._last_restart: int = 0
self._cycle_counter: int = 0
self._cycle_factor: float = 1.
self._updated_cycle_len: int = t_max
self._initialized: bool = False
super(CosineWithRestarts, self).__init__(optimizer, last_epoch)
def get_lr(self):
"""Get updated learning rate."""
# HACK: We need to check if this is the first time ``self.get_lr()`` was called,
# since ``torch.optim.lr_scheduler._LRScheduler`` will call ``self.get_lr()``
# when first initialized, but the learning rate should remain unchanged
# for the first epoch.
if not self._initialized:
self._initialized = True
return self.base_lrs
step = self.last_epoch + 1
self._cycle_counter = step - self._last_restart
lrs = [
self.eta_min + ((lr - self.eta_min) / 2) * (
np.cos(
np.pi *
(self._cycle_counter % self._updated_cycle_len) / self._updated_cycle_len
) + 1
) for lr in self.base_lrs
]
if self._cycle_counter % self._updated_cycle_len == 0:
# Adjust the cycle length.
self._cycle_factor *= self.factor
self._cycle_counter = 0
self._updated_cycle_len = int(self._cycle_factor * self.t_max)
self._last_restart = step
return lrs
| 5,695 | 36.228758 | 98 | py |
AFC | AFC-master/inclearn/lib/utils.py | import datetime
import logging
import os
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import torch
from sklearn import manifold
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
logger = logging.getLogger(__name__)
def to_onehot(targets, n_classes):
onehot = torch.zeros(targets.shape[0], n_classes).to(targets.device)
onehot.scatter_(dim=1, index=targets.long().view(-1, 1), value=1.)
return onehot
def check_loss(loss):
return not bool(torch.isnan(loss).item()) and bool((loss >= 0.).item())
def compute_accuracy(ypred, ytrue, task_size=10):
all_acc = {}
all_acc["total"] = round((ypred == ytrue).sum() / len(ytrue), 3)
for class_id in range(0, np.max(ytrue), task_size):
idxes = np.where(np.logical_and(ytrue >= class_id, ytrue < class_id + task_size))[0]
label = "{}-{}".format(
str(class_id).rjust(2, "0"),
str(class_id + task_size - 1).rjust(2, "0")
)
all_acc[label] = round((ypred[idxes] == ytrue[idxes]).sum() / len(idxes), 3)
return all_acc
def get_date():
return datetime.datetime.now().strftime("%Y%m%d")
def extract_features(model, loader):
targets, features = [], []
state = model.training
model.eval()
for input_dict in loader:
inputs, _targets = input_dict["inputs"], input_dict["targets"]
_targets = _targets.numpy()
_features = model.extract(inputs.to(model.device)).detach().cpu().numpy()
features.append(_features)
targets.append(_targets)
model.train(state)
return np.concatenate(features), np.concatenate(targets)
def compute_centroids(model, loader):
features, targets = extract_features(model, loader)
centroids_features, centroids_targets = [], []
for t in np.unique(targets):
indexes = np.where(targets == t)[0]
centroids_features.append(np.mean(features[indexes], axis=0, keepdims=True))
centroids_targets.append(t)
return np.concatenate(centroids_features), np.array(centroids_targets)
def classify(model, loader):
targets, predictions = [], []
for input_dict in loader:
inputs, _targets = input_dict["inputs"], input_dict["targets"]
outputs = model(inputs.to(model.device))
if not isinstance(outputs, list):
outputs = [outputs]
preds = outputs[-1].argmax(dim=1).detach().cpu().numpy()
predictions.append(preds)
targets.append(_targets)
return np.concatenate(predictions), np.concatenate(targets)
def plot_tsne(path, embeddings, targets):
assert embeddings.shape[0] == targets.shape[0]
tsne = manifold.TSNE(n_components=2)
embeddings_2d = tsne.fit_transform(embeddings)
plt.scatter(
embeddings_2d[..., 0],
embeddings_2d[..., 1],
c=targets,
vmin=min(targets),
vmax=max(targets),
s=10,
cmap=mpl.cm.get_cmap('RdYlBu')
)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(path)
def add_new_weights(network, weight_generation, current_nb_classes, task_size, inc_dataset):
if isinstance(weight_generation, str):
warnings.warn("Use a dict for weight_generation instead of str", DeprecationWarning)
weight_generation = {"type": weight_generation}
if weight_generation["type"] == "imprinted":
logger.info("Generating imprinted weights")
network.add_imprinted_classes(
list(range(current_nb_classes, current_nb_classes + task_size)), inc_dataset,
**weight_generation
)
elif weight_generation["type"] == "embedding":
logger.info("Generating embedding weights")
mean_embeddings = []
for class_index in range(current_nb_classes, current_nb_classes + task_size):
_, loader = inc_dataset.get_custom_loader([class_index])
features, _ = extract_features(network, loader)
features = features / np.linalg.norm(features, axis=-1)[..., None]
mean = np.mean(features, axis=0)
if weight_generation.get("proxy_per_class", 1) == 1:
mean_embeddings.append(mean)
else:
std = np.std(features, axis=0, ddof=1)
mean_embeddings.extend(
[
np.random.normal(loc=mean, scale=std)
for _ in range(weight_generation.get("proxy_per_class", 1))
]
)
network.add_custom_weights(np.stack(mean_embeddings))
elif weight_generation["type"] == "basic":
network.add_classes(task_size)
elif weight_generation["type"] == "ghosts":
features, targets = weight_generation["ghosts"]
features = features.cpu().numpy()
targets = targets.cpu().numpy()
weights = []
for class_id in range(current_nb_classes, current_nb_classes + task_size):
indexes = np.where(targets == class_id)[0]
class_features = features[indexes]
if len(class_features) == 0:
raise Exception(f"No ghost class_id={class_id} for weight generation!")
weights.append(np.mean(class_features, axis=0))
weights = torch.tensor(np.stack(weights)).float()
network.add_custom_weights(weights, ponderate=weight_generation.get("ponderate"))
else:
raise ValueError("Unknown weight generation type {}.".format(weight_generation["type"]))
def apply_kmeans(features, targets, nb_clusters, pre_normalization):
logger.info(
"Kmeans on {} samples (pre-normalized: {}) with {} clusters per class".format(
len(features), pre_normalization, nb_clusters
)
)
new_features = []
new_targets = []
for class_index in np.unique(targets):
kmeans = KMeans(n_clusters=nb_clusters)
class_sample_indexes = np.where(targets == class_index)[0]
class_features = features[class_sample_indexes]
class_targets = np.ones((nb_clusters,)) * class_index
if pre_normalization:
class_features = class_features / np.linalg.norm(class_features, axis=-1).reshape(-1, 1)
kmeans.fit(class_features)
new_features.append(kmeans.cluster_centers_)
new_targets.append(class_targets)
return np.concatenate(new_features), np.concatenate(new_targets)
def apply_knn(
features,
targets,
features_test,
targets_test,
nb_neighbors,
normalize=True,
weights="uniform"
):
logger.info(
"KNN with {} neighbors and pre-normalized features: {}, weights: {}.".format(
nb_neighbors, normalize, weights
)
)
if normalize:
features = features / np.linalg.norm(features, axis=-1).reshape(-1, 1)
knn = KNeighborsClassifier(n_neighbors=nb_neighbors, n_jobs=10, weights=weights)
knn.fit(features, targets)
if normalize:
features_test = features_test / np.linalg.norm(features_test, axis=-1).reshape(-1, 1)
pred_targets = knn.predict(features_test)
return pred_targets, targets_test
def select_class_samples(samples, targets, selected_class):
indexes = np.where(targets == selected_class)[0]
return samples[indexes], targets[indexes]
def matrix_infinity_norm(matrix):
# Matrix is of shape (w, h)
matrix = torch.abs(matrix)
summed_col = matrix.sum(1) # Shape (w,)
return torch.max(summed_col)
| 7,556 | 30.098765 | 100 | py |
AFC | AFC-master/inclearn/lib/calibration.py | import torch
from torch import optim
from torch.nn import functional as F
from inclearn.lib.network import (CalibrationWrapper, LinearModel, TemperatureScaling)
def calibrate(network, loader, device, indexes, calibration_type="linear"):
"""Corrects the bias for new classes.
:param network: The logits extractor model, usually convnet+FC w/o final act.
:param loader: The validation data loader.
:param device: Device on which apply the computation.
:param indexes: A list of tuple made a starting and ending indexes. They delimit
on which range of targets to apply the calibration. If given
several tuples, different models will be used per range.
:return: A wrapper `CalibrationWrapper`.
"""
logits, labels = _extract_data(network, loader, device)
calibration_wrapper = _get_calibration_model(indexes, calibration_type).to(device)
def eval():
corrected_logits = calibration_wrapper(logits)
loss = F.cross_entropy(corrected_logits, labels)
loss.backward()
return loss
optimizer = optim.LBFGS(calibration_wrapper.parameters(), lr=0.01, max_iter=50)
optimizer.step(eval)
return calibration_wrapper
def _get_calibration_model(indexes, calibration_type):
calibration_wrapper = CalibrationWrapper()
for start_index, end_index in indexes:
if calibration_type == "linear":
model = LinearModel(alpha=1., beta=0.)
elif calibration_type == "temperature":
model = TemperatureScaling(temperature=1.)
else:
raise ValueError("Unknown calibration model {}.".format(calibration_type))
calibration_wrapper.add_model(model, start_index, end_index)
return calibration_wrapper
def _extract_data(network, loader, device):
logits = []
labels = []
with torch.no_grad():
for input_dict in loader:
logits.append(network(input_dict["inputs"].to(device))["logits"])
labels.append(input_dict["targets"].to(device))
logits = torch.cat(logits).to(device)
labels = torch.cat(labels).to(device)
return logits, labels
| 2,171 | 33.47619 | 86 | py |
AFC | AFC-master/inclearn/lib/vizualization.py | import torch
def grad_cam(spatial_features, selected_logits):
batch_size = spatial_features.shape[0]
assert batch_size == len(selected_logits)
formated_logits = [selected_logits[i] for i in range(batch_size)]
import pdb
pdb.set_trace()
grads = torch.autograd.grad(
formated_logits, spatial_features, retain_graph=True, create_graph=True
)
assert grads.shape == spatial_features.shape
return grads
| 447 | 22.578947 | 79 | py |
AFC | AFC-master/inclearn/lib/factory.py | import warnings
import torch
from torch import optim
from inclearn import models
from inclearn.convnet import (
densenet, my_resnet, my_resnet_importance, my_resnet2, my_resnet_brn,
my_resnet_mcbn, my_resnet_mtl, resnet, resnet_importance,
resnet_mtl, ucir_resnet, vgg
)
from inclearn.lib import data, schedulers
def get_optimizer(params, optimizer, lr, weight_decay=0.0):
if optimizer == "adam":
return optim.Adam(params, lr=lr, weight_decay=weight_decay)
elif optimizer == "adamw":
return optim.AdamW(params, lr=lr, weight_decay=weight_decay)
elif optimizer == "sgd":
return optim.SGD(params, lr=lr, weight_decay=weight_decay, momentum=0.9)
elif optimizer == "sgd_nesterov":
return optim.SGD(params, lr=lr, weight_decay=weight_decay, momentum=0.9, nesterov=True)
raise NotImplementedError
def get_convnet(convnet_type, **kwargs):
if convnet_type == "resnet18":
return resnet.resnet18(**kwargs)
elif convnet_type == "resnet18_importance":
return resnet_importance.resnet18(**kwargs)
if convnet_type == "resnet101":
return resnet.resnet101(**kwargs)
if convnet_type == "resnet18_mtl":
return resnet_mtl.resnet18(**kwargs)
elif convnet_type == "resnet34":
return resnet.resnet34(**kwargs)
elif convnet_type == "resnet32":
return resnet.resnet32(**kwargs)
elif convnet_type == "rebuffi":
return my_resnet.resnet_rebuffi(**kwargs)
elif convnet_type == "rebuffi_importance":
return my_resnet_importance.resnet_rebuffi(**kwargs)
elif convnet_type == "rebuffi_brn":
return my_resnet_brn.resnet_rebuffi(**kwargs)
elif convnet_type == "myresnet18":
return my_resnet2.resnet18(**kwargs)
elif convnet_type == "myresnet34":
return my_resnet2.resnet34(**kwargs)
elif convnet_type == "densenet121":
return densenet.densenet121(**kwargs)
elif convnet_type == "ucir":
return ucir_resnet.resnet32(**kwargs)
elif convnet_type == "rebuffi_mcbn":
return my_resnet_mcbn.resnet_rebuffi(**kwargs)
elif convnet_type == "rebuffi_mtl":
return my_resnet_mtl.resnet_rebuffi(**kwargs)
elif convnet_type == "vgg19":
return vgg.vgg19_bn(**kwargs)
raise NotImplementedError("Unknwon convnet type {}.".format(convnet_type))
def get_model(args):
dict_models = {
"icarl": models.ICarl,
"afc": models.AFC,
}
model = args["model"].lower()
if model not in dict_models:
raise NotImplementedError(
"Unknown model {}, must be among {}.".format(args["model"], list(dict_models.keys()))
)
return dict_models[model](args)
def get_data(args, class_order=None):
return data.IncrementalDataset(
dataset_name=args["dataset"],
random_order=args["random_classes"],
shuffle=True,
batch_size=args["batch_size"],
workers=args["workers"],
validation_split=args["validation"],
onehot=args["onehot"],
increment=args["increment"],
initial_increment=args["initial_increment"],
sampler=get_sampler(args),
sampler_config=args.get("sampler_config", {}),
data_path=args["data_path"],
class_order=class_order,
seed=args["seed"],
dataset_transforms=args.get("dataset_transforms", {}),
all_test_classes=args.get("all_test_classes", False),
metadata_path=args.get("metadata_path")
)
def set_device(args):
devices = []
for device_type in args["device"]:
if device_type == -1:
device = torch.device("cpu")
else:
device = torch.device("cuda:{}".format(device_type))
devices.append(device)
args["device"] = devices
def get_sampler(args):
if args["sampler"] is None:
return None
sampler_type = args["sampler"].lower().strip()
if sampler_type == "npair":
return data.NPairSampler
elif sampler_type == "triplet":
return data.TripletSampler
elif sampler_type == "tripletsemihard":
return data.TripletCKSampler
raise ValueError("Unknown sampler {}.".format(sampler_type))
def get_lr_scheduler(
scheduling_config, optimizer, nb_epochs, lr_decay=0.1, warmup_config=None, task=0
):
if scheduling_config is None:
return None
elif isinstance(scheduling_config, str):
warnings.warn("Use a dict not a string for scheduling config!", DeprecationWarning)
scheduling_config = {"type": scheduling_config}
elif isinstance(scheduling_config, list):
warnings.warn("Use a dict not a list for scheduling config!", DeprecationWarning)
scheduling_config = {"type": "step", "epochs": scheduling_config}
if scheduling_config["type"] == "step":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
scheduling_config["epochs"],
gamma=scheduling_config.get("gamma") or lr_decay
)
elif scheduling_config["type"] == "exponential":
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, scheduling_config["gamma"])
elif scheduling_config["type"] == "plateau":
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=scheduling_config["gamma"]
)
elif scheduling_config["type"] == "cosine":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, nb_epochs)
elif scheduling_config["type"] == "cosine_with_restart":
scheduler = schedulers.CosineWithRestarts(
optimizer,
t_max=scheduling_config.get("cycle_len", nb_epochs),
factor=scheduling_config.get("factor", 1.)
)
elif scheduling_config["type"] == "cosine_annealing_with_restart":
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer, T_0=1, T_mult=2, eta_min=scheduling_config.get("min_lr")
)
else:
raise ValueError("Unknown LR scheduling type {}.".format(scheduling_config["type"]))
if warmup_config:
if warmup_config.get("only_first_step", True) and task != 0:
pass
else:
print("Using WarmUp")
scheduler = schedulers.GradualWarmupScheduler(
optimizer=optimizer, after_scheduler=scheduler, **warmup_config
)
return scheduler
| 6,414 | 34.247253 | 97 | py |
AFC | AFC-master/inclearn/lib/metrics.py | import collections
import numpy as np
import torch
class MetricLogger:
def __init__(self, nb_tasks, nb_classes, increments):
self.metrics = collections.defaultdict(list)
self.nb_tasks = nb_tasks
self.nb_classes = nb_classes
self.increments = increments
self._accuracy_matrix = np.ones((nb_classes, nb_tasks), dtype="float16") * -1
self._task_counter = 0
def log_task(self, ypreds, ytrue, task_size, zeroshot=False):
self.metrics["accuracy"].append(
accuracy_per_task(ypreds, ytrue, task_size=10, topk=1)
) # FIXME various task size
self.metrics["accuracy_top5"].append(
accuracy_per_task(ypreds, ytrue, task_size=None, topk=5)
)
self.metrics["accuracy_per_class"].append(
accuracy_per_task(ypreds, ytrue, task_size=1, topk=1)
)
self.metrics["incremental_accuracy"].append(incremental_accuracy(self.metrics["accuracy"]))
self.metrics["incremental_accuracy_top5"].append(
incremental_accuracy(self.metrics["accuracy_top5"])
)
self.metrics["forgetting"].append(forgetting(self.metrics["accuracy"]))
self._update_accuracy_matrix(self.metrics["accuracy_per_class"][-1])
self.metrics["cord"].append(cord_metric(self._accuracy_matrix))
#self.metrics["cord_old"].append(cord_metric(self._accuracy_matrix, only="old"))
#self.metrics["cord_new"].append(cord_metric(self._accuracy_matrix, only="new"))
if zeroshot:
seen_classes_indexes = np.where(ytrue < sum(self.increments[:self._task_counter + 1])
)[0]
self.metrics["seen_classes_accuracy"].append(
accuracy(ypreds[seen_classes_indexes], ytrue[seen_classes_indexes])
)
unseen_classes_indexes = np.where(
ytrue >= sum(self.increments[:self._task_counter + 1])
)[0]
self.metrics["unseen_classes_accuracy"].append(
accuracy(ypreds[unseen_classes_indexes], ytrue[unseen_classes_indexes])
)
if self._task_counter > 0:
self.metrics["old_accuracy"].append(old_accuracy(ypreds, ytrue, task_size))
self.metrics["new_accuracy"].append(new_accuracy(ypreds, ytrue, task_size))
self._task_counter += 1
@property
def last_results(self):
results = {
"task_id": len(self.metrics["accuracy"]) - 1,
"accuracy": self.metrics["accuracy"][-1],
"incremental_accuracy": self.metrics["incremental_accuracy"][-1],
"accuracy_top5": self.metrics["accuracy_top5"][-1],
"incremental_accuracy_top5": self.metrics["incremental_accuracy_top5"][-1],
"forgetting": self.metrics["forgetting"][-1],
"accuracy_per_class": self.metrics["accuracy_per_class"][-1],
"cord": self.metrics["cord"][-1]
}
if "old_accuracy" in self.metrics:
results.update(
{
"old_accuracy": self.metrics["old_accuracy"][-1],
"new_accuracy": self.metrics["new_accuracy"][-1],
"avg_old_accuracy": np.mean(self.metrics["old_accuracy"]),
"avg_new_accuracy": np.mean(self.metrics["new_accuracy"]),
}
)
if "seen_classes_accuracy" in self.metrics:
results.update(
{
"seen_classes_accuracy": self.metrics["seen_classes_accuracy"][-1],
"unseen_classes_accuracy": self.metrics["unseen_classes_accuracy"][-1],
}
)
return results
def _update_accuracy_matrix(self, new_accuracy_per_class):
for k, v in new_accuracy_per_class.items():
if k == "total":
continue
class_id = int(k.split("-")[0])
self._accuracy_matrix[class_id, self._task_counter] = v
def cord_metric(accuracy_matrix, only=None):
accuracies = []
for class_id in range(accuracy_matrix.shape[0]):
filled_indexes = np.where(accuracy_matrix[class_id] > -1.)[0]
if only == "old":
filled_indexes[1:]
elif only == "new":
filled_indexes[:1]
if len(filled_indexes) == 0:
continue
accuracies.append(np.mean(accuracy_matrix[class_id, filled_indexes]))
return np.mean(accuracies).item()
def accuracy_per_task(ypreds, ytrue, task_size=10, topk=1):
"""Computes accuracy for the whole test & per task.
:param ypred: The predictions array.
:param ytrue: The ground-truth array.
:param task_size: The size of the task.
:return: A dictionnary.
"""
all_acc = {}
all_acc["total"] = accuracy(ypreds, ytrue, topk=topk)
if task_size is not None:
for class_id in range(0, np.max(ytrue) + task_size, task_size):
if class_id > np.max(ytrue):
break
idxes = np.where(np.logical_and(ytrue >= class_id, ytrue < class_id + task_size))[0]
label = "{}-{}".format(
str(class_id).rjust(2, "0"),
str(class_id + task_size - 1).rjust(2, "0")
)
all_acc[label] = accuracy(ypreds[idxes], ytrue[idxes], topk=topk)
return all_acc
def old_accuracy(ypreds, ytrue, task_size):
"""Computes accuracy for the whole test & per task.
:param ypred: The predictions array.
:param ytrue: The ground-truth array.
:param task_size: The size of the task.
:return: A dictionnary.
"""
nb_classes = ypreds.shape[1]
old_class_indexes = np.where(ytrue < nb_classes - task_size)[0]
return accuracy(ypreds[old_class_indexes], ytrue[old_class_indexes], topk=1)
def new_accuracy(ypreds, ytrue, task_size):
"""Computes accuracy for the whole test & per task.
:param ypred: The predictions array.
:param ytrue: The ground-truth array.
:param task_size: The size of the task.
:return: A dictionnary.
"""
nb_classes = ypreds.shape[1]
new_class_indexes = np.where(ytrue >= nb_classes - task_size)[0]
return accuracy(ypreds[new_class_indexes], ytrue[new_class_indexes], topk=1)
def accuracy(output, targets, topk=1):
"""Computes the precision@k for the specified values of k"""
output, targets = torch.tensor(output), torch.tensor(targets)
batch_size = targets.shape[0]
if batch_size == 0:
return 0.
nb_classes = len(np.unique(targets))
topk = min(topk, nb_classes)
_, pred = output.topk(topk, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1).expand_as(pred))
correct_k = correct[:topk].view(-1).float().sum(0).item()
return round(correct_k / batch_size, 3)
def incremental_accuracy(accuracies):
"""Computes the average incremental accuracy as described in iCaRL.
It is the average of the current task accuracy (tested on 0-X) with the
previous task accuracy.
:param acc_dict: A list TODO
"""
return sum(task_acc["total"] for task_acc in accuracies) / len(accuracies)
def forgetting(accuracies):
if len(accuracies) == 1:
return 0.
last_accuracies = accuracies[-1]
usable_tasks = last_accuracies.keys()
forgetting = 0.
for task in usable_tasks:
if task == "total":
continue
max_task = 0.
for task_accuracies in accuracies[:-1]:
if task in task_accuracies:
max_task = max(max_task, task_accuracies[task])
forgetting += max_task - last_accuracies[task]
return forgetting / len(usable_tasks)
def forward_transfer(accuracies):
"""Measures the influence that learning a task has on the performance of future tasks.
References:
* Don't forget, there is more than forgetting: new metrics for Continual Learning
Diaz et al.
NeurIPS Workshop 2018
"""
nb_tasks = len(accuracies)
fwt = 0
for i in range(nb_tasks):
pass
| 8,076 | 33.370213 | 99 | py |
AFC | AFC-master/inclearn/lib/distance.py | import torch
from torch.nn import functional as F
def squared_euclidian_distance(a, b):
return torch.cdist(a, b)**2
def cosine_similarity(a, b):
return torch.mm(F.normalize(a, p=2, dim=-1), F.normalize(b, p=2, dim=-1).T)
def stable_cosine_distance(a, b, squared=True):
"""Computes the pairwise distance matrix with numerical stability."""
mat = torch.cat([a, b])
pairwise_distances_squared = torch.add(
mat.pow(2).sum(dim=1, keepdim=True).expand(mat.size(0), -1),
torch.t(mat).pow(2).sum(dim=0, keepdim=True).expand(mat.size(0), -1)
) - 2 * (torch.mm(mat, torch.t(mat)))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = torch.clamp(pairwise_distances_squared, min=0.0)
# Get the mask where the zero distances are at.
error_mask = torch.le(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = torch.sqrt(pairwise_distances_squared + error_mask.float() * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = torch.mul(pairwise_distances, (error_mask == False).float())
# Explicitly set diagonals to zero.
mask_offdiagonals = 1 - torch.eye(*pairwise_distances.size(), device=pairwise_distances.device)
pairwise_distances = torch.mul(pairwise_distances, mask_offdiagonals)
return pairwise_distances[:a.shape[0], a.shape[0]:]
| 1,492 | 34.547619 | 99 | py |
AFC | AFC-master/inclearn/lib/herding.py | import numpy as np
import torch
from sklearn.cluster import KMeans
from torch.nn import functional as F
from inclearn.lib import utils
def closest_to_mean(features, nb_examplars):
features = features / (np.linalg.norm(features, axis=0) + 1e-8)
class_mean = np.mean(features, axis=0)
return _l2_distance(features, class_mean).argsort()[:nb_examplars]
def icarl_selection(features, nb_examplars):
D = features.T
D = D / (np.linalg.norm(D, axis=0) + 1e-8)
mu = np.mean(D, axis=1)
herding_matrix = np.zeros((features.shape[0],))
w_t = mu
iter_herding, iter_herding_eff = 0, 0
while not (
np.sum(herding_matrix != 0) == min(nb_examplars, features.shape[0])
) and iter_herding_eff < 1000:
tmp_t = np.dot(w_t, D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if herding_matrix[ind_max] == 0:
herding_matrix[ind_max] = 1 + iter_herding
iter_herding += 1
w_t = w_t + mu - D[:, ind_max]
herding_matrix[np.where(herding_matrix == 0)[0]] = 10000
return herding_matrix.argsort()[:nb_examplars]
def random(features, nb_examplars):
return np.random.permutation(len(features))[:nb_examplars]
def kmeans(features, nb_examplars, k=5):
"""Samples examplars for memory according to KMeans.
:param features: The image features of a single class.
:param nb_examplars: Number of images to keep.
:param k: Number of clusters for KMeans algo, defaults to 5
:return: A numpy array of indexes.
"""
model = KMeans(n_clusters=k)
cluster_assignements = model.fit_predict(features)
nb_per_clusters = nb_examplars // k
indexes = []
for c in range(k):
c_indexes = np.random.choice(np.where(cluster_assignements == c)[0], size=nb_per_clusters)
indexes.append(c_indexes)
return np.concatenate(indexes)
def confusion(ypreds, ytrue, nb_examplars, class_id=None, minimize_confusion=True):
"""Samples examplars for memory according to the predictions.
:param ypreds: All the predictions (shape [b, c]).
:param ytrue: The true label.
:param nb_examplars: Number of images to keep.
:param minimize_confusion: Samples easiest examples or hardest.
"""
indexes = np.where(ytrue == class_id)[0]
ypreds, ytrue = ypreds[indexes], ytrue[indexes]
ranks = ypreds.argsort(axis=1)[:, ::-1][np.arange(len(ypreds)), ytrue]
indexes = ranks.argsort()
if minimize_confusion:
return indexes[:nb_examplars]
return indexes[-nb_examplars:]
def minimize_confusion(inc_dataset, network, memory, class_index, nb_examplars):
_, new_loader = inc_dataset.get_custom_loader(class_index, mode="test")
new_features, _ = utils.extract_features(network, new_loader)
new_mean = np.mean(new_features, axis=0)
from sklearn.cluster import KMeans
n_clusters = 4
model = KMeans(n_clusters=n_clusters)
model.fit(new_features)
indexes = []
for i in range(n_clusters):
cluster = model.cluster_centers_[i]
distances = _l2_distance(cluster, new_features)
indexes.append(distances.argsort()[:nb_examplars // n_clusters])
return np.concatenate(indexes)
if memory is None:
# First task
#return icarl_selection(new_features, nb_examplars)
return np.random.permutation(new_features.shape[0])[:nb_examplars]
distances = _l2_distance(new_mean, new_features)
data_memory, targets_memory = memory
for indexes in _split_memory_per_class(targets_memory):
_, old_loader = inc_dataset.get_custom_loader(
[], memory=(data_memory[indexes], targets_memory[indexes]), mode="test"
)
old_features, _ = utils.extract_features(network, old_loader)
old_mean = np.mean(old_features, axis=0)
# The larger the distance to old mean
distances -= _l2_distance(old_mean, new_features)
return distances.argsort()[:int(nb_examplars)]
def var_ratio(memory_per_class, network, loader, select="max", type=None):
var_ratios = []
for input_dict in loader:
inputs = input_dict["inputs"].to(network.device)
with torch.no_grad():
outputs = network(inputs)
var_ratios.append(outputs["var_ratio"])
var_ratios = np.concatenate(var_ratios)
indexes = var_ratios.argsort()
if select == "max":
return indexes[-memory_per_class:]
elif select == "min":
return indexes[:memory_per_class]
raise ValueError("Only possible value for <select> are [max, min], not {}.".format(select))
def mcbn(memory_per_class, network, loader, select="max", nb_samples=100, type=None):
if not hasattr(network.convnet, "sampling_mode"):
raise ValueError("Network must be MCBN-compatible.")
network.convnet.sampling_mode()
all_probs = []
for input_dict in loader:
inputs = input_dict["inputs"].to(network.device)
probs = []
for _ in range(nb_samples):
with torch.no_grad():
outputs = network(inputs)
logits = outputs["logits"]
probs.append(F.softmax(logits, dim=-1).cpu().numpy())
probs = np.stack(probs)
all_probs.append(probs)
network.convnet.normal_mode()
all_probs = np.concatenate(all_probs, axis=1)
var_ratios = _var_ratio(all_probs.transpose(1, 0, 2))
indexes = var_ratios.argsort()
assert len(indexes) == all_probs.shape[1]
if select == "max":
return indexes[-memory_per_class:]
elif select == "min":
return indexes[:memory_per_class]
raise ValueError("Only possible value for <select> are [max, min], not {}.".format(select))
# ---------
# Utilities
# ---------
def _var_ratio(sampled_probs):
predicted_class = sampled_probs.max(axis=2)
hist = np.array(
[
np.histogram(predicted_class[i, :], range=(0, 10))[0]
for i in range(predicted_class.shape[0])
]
)
return 1. - hist.max(axis=1) / sampled_probs.shape[1]
def _l2_distance(x, y):
return np.power(x - y, 2).sum(-1)
def _split_memory_per_class(targets):
max_class = max(targets)
for class_index in range(max_class):
yield np.where(targets == class_index)[0]
| 6,268 | 29.881773 | 98 | py |
AFC | AFC-master/inclearn/lib/loops/generators.py | import collections
import itertools
import logging
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from tqdm import tqdm
from .loops import _print_metrics
logger = logging.getLogger(__name__)
def perclass_loop(
inc_dataset,
class_ids,
devices,
n_epochs,
optimizer,
loss_function,
task,
n_tasks,
target_to_word,
network=None,
batch_size=128,
word_embeddings=None,
scheduler=None,
preprocessing=None,
memory_class_ids=None,
memory=None,
disable_progressbar=False,
features_key="raw_features",
max_per_batch=3000
):
if len(devices) > 1:
logger.info("Duplicating model on {} gpus.".format(len(devices)))
training_network = nn.DataParallel(network, devices)
else:
training_network = network
visual_features, visual_targets = _extract_features(
class_ids,
training_network,
inc_dataset,
devices[0],
memory_class_ids=memory_class_ids,
memory=memory,
disable_progressbar=disable_progressbar,
features_key=features_key
)
if preprocessing is not None:
all_features = torch.cat(list(visual_features.values()))
logger.info(f"Features shape: {str(all_features.shape)}.")
preprocessing.fit(all_features)
del all_features
for k in list(visual_features.keys()):
visual_features[k] = preprocessing.transform(visual_features[k])
# Actually train the generator
if n_epochs > 0:
logger.info("Training the generator...")
for epoch in range(n_epochs):
metrics = collections.defaultdict(float)
prog_bar = tqdm(
class_ids,
ascii=True,
bar_format="{desc}: {bar} | {percentage:3.0f}%",
disable=disable_progressbar
)
for batch_index, class_id in enumerate(prog_bar, start=1):
class_loss = 0.
qt = max(visual_features[class_id].shape[1] // max_per_batch, 1)
for i in range(qt):
lo_index = i * max_per_batch
hi_index = (i + 1) * max_per_batch
real_features = visual_features[class_id][lo_index:hi_index]
optimizer.zero_grad()
if batch_size is None:
words = target_to_word([class_id for _ in range(len(real_features))]
).to(devices[0])
else:
words = target_to_word([class_id for _ in range(batch_size)]).to(devices[0])
semantic_features = word_embeddings(words)
loss = loss_function(real_features, semantic_features, class_id, words, metrics)
loss.backward()
optimizer.step()
class_loss += loss.item()
metrics["loss"] += class_loss / qt
_print_metrics(metrics, prog_bar, epoch, n_epochs, batch_index, task, n_tasks)
if scheduler:
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(metrics["gmm_loss"] / batch_index)
else:
scheduler.step(epoch)
for class_id in class_ids:
visual_targets[class_id] = torch.cat(visual_targets[class_id])
return torch.cat(list(visual_features.values())), torch.cat(list(visual_targets.values()))
def linear_loop(
visual_features,
visual_targets,
devices,
n_epochs,
optimizer,
loss_function,
task,
n_tasks,
target_to_word,
word_embeddings=None,
scheduler=None,
batch_size=128,
normalize=False,
disable_progressbar=False
):
loader = _get_loader(visual_features, visual_targets, batch_size=batch_size)
# Actually train the generator
if n_epochs > 0:
logger.info("Training the linear transformation...")
for epoch in range(n_epochs):
metrics = collections.defaultdict(float)
prog_bar = tqdm(
loader,
ascii=True,
bar_format="{desc}: {bar} | {percentage:3.0f}%",
disable=disable_progressbar
)
for batch_index, (x, y) in enumerate(prog_bar, start=1):
optimizer.zero_grad()
words = target_to_word(y).to(devices[0])
semantic_features = word_embeddings(words)
if normalize:
loss = loss_function(
F.normalize(x, dim=1, p=2), F.normalize(semantic_features, dim=1, p=2)
)
else:
loss = loss_function(x, semantic_features)
loss.backward()
optimizer.step()
metrics["linear_loss"] += loss.item()
_print_metrics(metrics, prog_bar, epoch, n_epochs, batch_index, task, n_tasks)
if scheduler:
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(metrics["linear_loss"] / batch_index)
else:
scheduler.step(epoch)
def adv_autoencoder_loop(
inc_dataset,
class_ids,
devices,
n_epochs,
task,
n_tasks,
target_to_word,
network=None,
batch_size=128,
autoencoder=None,
preprocessing=None,
memory_class_ids=None,
memory=None,
lr=0.0002,
disable_progressbar=False,
features_key="raw_features"
):
if len(devices) > 1:
logger.info("Duplicating model on {} gpus.".format(len(devices)))
training_network = nn.DataParallel(network, devices)
else:
training_network = network
visual_features, visual_targets = _extract_features(
class_ids,
training_network,
inc_dataset,
devices[0],
memory_class_ids=memory_class_ids,
memory=memory,
disable_progressbar=disable_progressbar,
features_key=features_key
)
visual_features = torch.cat(list(visual_features.values()))
for class_id in class_ids:
visual_targets[class_id] = torch.cat(visual_targets[class_id])
visual_targets = torch.cat(list(visual_targets.values()))
loader = _get_loader(visual_features, visual_targets, batch_size=batch_size)
if preprocessing is not None:
assert False
preprocessing.fit_transform(visual_features)
optimizer_e = torch.optim.Adam(autoencoder.encoder.parameters(), lr=lr)
optimizer_d = torch.optim.Adam(autoencoder.discriminator.parameters(), lr=lr)
optimizer_g = torch.optim.Adam(autoencoder.decoder.parameters(), lr=lr)
for epoch in range(n_epochs):
metrics = collections.defaultdict(float)
prog_bar = tqdm(
loader,
ascii=True,
bar_format="{desc}: {bar} | {percentage:3.0f}%",
disable=disable_progressbar
)
for batch_index, (x, y) in enumerate(prog_bar, start=1):
optimizer_e.zero_grad()
optimizer_d.zero_grad()
optimizer_g.zero_grad()
words = target_to_word(y).to(devices[0])
attributes = autoencoder.emb(words)
noise = autoencoder.get_noise(len(x))
# Training encoder-decoder:
pred_noise = autoencoder.encoder(x)
reconstruction = autoencoder.decoder(torch.cat((attributes, pred_noise), dim=-1))
mse_loss = F.mse_loss(reconstruction, x)
mse_loss.backward()
optimizer_e.step()
optimizer_g.step()
# Training discriminator:
autoencoder.encoder.eval()
pred_noise = autoencoder.encoder(x)
fake_dis = autoencoder.discriminator(pred_noise)
true_dis = autoencoder.discriminator(noise)
dis_true_loss = F.binary_cross_entropy_with_logits(
true_dis,
torch.ones_like(true_dis).to(devices[0])
)
dis_fake_loss = F.binary_cross_entropy_with_logits(
fake_dis,
torch.zeros_like(fake_dis).to(devices[0])
)
dis_loss = dis_true_loss + dis_fake_loss
dis_loss.backward()
optimizer_d.step()
# Training generator:
optimizer_g.zero_grad()
autoencoder.encoder.train()
pred_noise = autoencoder.encoder(x)
fake_dis = autoencoder.discriminator(pred_noise)
gen_loss = F.binary_cross_entropy_with_logits(
fake_dis,
torch.ones_like(fake_dis).to(devices[0])
)
gen_loss.backward()
optimizer_g.step()
metrics["rec"] += mse_loss.item()
metrics["dis"] += dis_loss.item()
metrics["gen"] += gen_loss.item()
_print_metrics(metrics, prog_bar, epoch, n_epochs, batch_index, task, n_tasks)
return visual_features, visual_targets
def features_to_classifier_loop(
features,
targets,
flags,
epochs,
optimizer,
classifier,
loss_function,
scheduler=None,
disable_progressbar=False
):
loader = _get_loader(features, targets, flags)
for epoch in range(epochs):
metrics = collections.defaultdict(float)
prog_bar = tqdm(
loader,
ascii=True,
bar_format="{desc}: {bar} | {percentage:3.0f}%",
disable=disable_progressbar
)
for batch_index, (x, y, f) in enumerate(prog_bar, start=1):
optimizer.zero_grad()
logits = classifier(x)["logits"]
loss = loss_function(logits, y, f, metrics)
loss.backward()
optimizer.step()
metrics["loss"] += loss.item()
_print_metrics(metrics, prog_bar, epoch, epochs, batch_index, 0, 1)
if scheduler:
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(metrics["loss"] / batch_index)
else:
scheduler.step(epoch)
def online_generation(
features,
targets,
epochs,
optimizer,
classifier,
loss_function,
word_embeddings,
target_to_word,
unseen_class_ids=None,
scheduler=None,
unseen_amount=100,
disable_progressbar=False
):
word_embeddings.eval()
for epoch in range(epochs):
metrics = collections.defaultdict(float)
fake_features, fake_targets = [], []
for class_id in unseen_class_ids:
class_ids = [class_id for _ in range(unseen_amount)]
words = target_to_word(class_ids).to(word_embeddings.device)
with torch.no_grad():
fake_features.append(word_embeddings(words))
fake_targets.append(torch.tensor(class_ids).to(word_embeddings.device))
fake_features = torch.cat(fake_features)
fake_targets = torch.cat(fake_targets)
loader = _get_loader(
torch.cat((features, fake_features), dim=0),
torch.cat((targets.to(word_embeddings.device), fake_targets), dim=0)
)
prog_bar = tqdm(
loader,
ascii=True,
bar_format="{desc}: {bar} | {percentage:3.0f}%",
disable=disable_progressbar
)
for batch_index, (x, y) in enumerate(prog_bar, start=1):
optimizer.zero_grad()
logits = classifier(x)["logits"]
loss = loss_function(logits, y)
loss.backward()
optimizer.step()
metrics["loss"] += loss.item()
_print_metrics(metrics, prog_bar, epoch, epochs, batch_index, 0, 1)
if scheduler:
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(metrics["loss"] / batch_index)
else:
scheduler.step(epoch)
word_embeddings.train()
# ---------
# Utilities
# ---------
def _extract_features(
class_ids,
training_network,
inc_dataset,
device=None,
memory_class_ids=None,
memory=None,
disable_progressbar=False,
features_key="raw_features"
):
"""Extract features for every given class, and keep them in GPU memory for
faster loading.
:param class_ids: List of classes to extract.
:param training_network: The network used to extract.
:param inc_dataset: The incremental dataset needed to fetch data.
:param device: A potential GPU device.
:param memory_class_ids: List of old classes that belong to memory.
:param memory: A tuple of (data_x, data_y), in numpy format.
:param disable_progressbar: Hide progress bar, useful for gridsearch.
:return: A tuple of both dict class_id->features and targets.
"""
if memory_class_ids is None:
memory_class_ids = [] # Shouldn't set empty list as default value, google it.
# Extract features
visual_features = collections.defaultdict(list)
visual_targets = collections.defaultdict(list)
logger.info("Computing class features...")
prog_bar = tqdm(
class_ids, ascii=True, bar_format="{bar} | {percentage:3.0f}%", disable=disable_progressbar
)
for index, class_id in enumerate(prog_bar, start=1):
if class_id in memory_class_ids:
# We cannot extract all features, the class is "old", and thus only
# memory data is available for it.
class_memory = _select_memory(memory, class_id)
loader_args = [[]]
loader_kwargs = {"memory": class_memory, "mode": "test", "data_source": "train"}
else:
# New class, thus everything can be used, enjoy!
loader_args = [[class_id]]
loader_kwargs = {"mode": "test", "data_source": "train"}
loader_base = inc_dataset.get_custom_loader(*loader_args, **loader_kwargs)[1]
loader_kwargs["mode"] = "flip"
loader_flip = inc_dataset.get_custom_loader(*loader_args, **loader_kwargs)[1]
for loader in (loader_base, loader_flip):
for input_dict in loader:
inputs, targets = input_dict["inputs"], input_dict["targets"]
with torch.no_grad():
outputs = training_network(inputs.to(device))
visual_features[class_id].append(outputs[features_key])
visual_targets[class_id].append(targets)
visual_features[class_id] = torch.cat(visual_features[class_id])
return visual_features, visual_targets
def _select_memory(memory, class_id):
mem_x, mem_y = memory
indexes = np.where(mem_y == class_id)[0]
return mem_x[indexes], mem_y[indexes]
def _get_loader(features, targets, flags=None, batch_size=128):
class Dataset(torch.utils.data.Dataset):
def __init__(self, features, targets, flags=None):
self.features = features
self.targets = targets
self.flags = flags
def __len__(self):
return self.features.shape[0]
def __getitem__(self, index):
f, t = self.features[index], self.targets[index]
if self.flags is None:
return f, t, 1
return f, t, self.flags[index]
dataset = Dataset(features, targets, flags)
return torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=batch_size)
| 15,274 | 30.690871 | 99 | py |
AFC | AFC-master/inclearn/lib/loops/loops.py | import collections
import logging
import torch
from torch import nn
from inclearn.lib.network import hook
from tqdm import tqdm
logger = logging.getLogger(__name__)
def single_loop(
train_loader,
val_loader,
devices,
network,
n_epochs,
optimizer,
train_function,
eval_function,
task,
n_tasks,
scheduler=None,
disable_progressbar=False,
eval_every_x_epochs=None,
config=None,
early_stopping=None
):
best_epoch, best_acc = -1, -1.
wait = 0
grad, act = None, None
if len(devices) > 1:
logger.info("Duplicating model on {} gpus.".format(len(devices)))
training_network = nn.DataParallel(network, devices)
if network.gradcam_hook:
logger.info("Adding hook on multi-gpu model.")
grad, act, back_hook, for_hook = hook.get_gradcam_hook(training_network)
training_network.module.convnet.last_conv.register_backward_hook(back_hook)
training_network.module.convnet.last_conv.register_forward_hook(for_hook)
else:
training_network = network
for epoch in range(n_epochs):
metrics = collections.defaultdict(float)
prog_bar = tqdm(
train_loader,
disable=disable_progressbar,
ascii=True,
bar_format="{desc}: {percentage:3.0f}% | {n_fmt}/{total_fmt} | {rate_fmt}{postfix}"
)
for batch_index, input_dict in enumerate(prog_bar, start=1):
inputs, targets = input_dict["inputs"], input_dict["targets"]
memory_flags = input_dict["memory_flags"]
if grad is not None:
_clean_list(grad)
_clean_list(act)
optimizer.zero_grad()
loss = train_function(
training_network,
inputs,
targets,
memory_flags,
metrics,
epoch=epoch,
epochs=n_epochs,
gradcam_grad=grad,
gradcam_act=act,
config=config
)
if isinstance(loss, float):
# Not a tensor, because the loss was never computed, probably
# because of some sample requirements
continue
loss.backward()
optimizer.step()
_print_metrics(metrics, prog_bar, epoch, n_epochs, batch_index, task, n_tasks)
if scheduler:
scheduler.step(epoch)
if eval_every_x_epochs and epoch != 0 and epoch % eval_every_x_epochs == 0:
training_network.eval()
accuracy = eval_function(training_network, val_loader)
training_network.train()
logger.info("Val accuracy: {}".format(accuracy))
if accuracy > best_acc:
best_epoch = epoch
best_acc = accuracy
wait = 0
else:
wait += 1
if early_stopping and early_stopping["patience"] > wait:
logger.warning("Early stopping!")
break
if eval_every_x_epochs:
logger.info("Best accuracy reached at epoch {} with {}%.".format(best_epoch, best_acc))
def _print_metrics(metrics, prog_bar, epoch, nb_epochs, nb_batches, task, n_tasks):
failed_metrics = []
for metric_name, metric_value in metrics.items():
if metric_value == float('nan'):
failed_metrics.append(metric_name)
if len(failed_metrics) > 0:
raise Exception(f"Following metrics were NaN: {str(failed_metrics)}!")
pretty_metrics = ", ".join(
"{}: {}".format(metric_name, round(metric_value / nb_batches, 3))
for metric_name, metric_value in metrics.items()
)
prog_bar.set_description(
"T{}/{}, E{}/{} => {}".format(task + 1, n_tasks, epoch + 1, nb_epochs, pretty_metrics)
)
def _clean_list(l):
for i in range(len(l)):
l[i] = None
| 3,951 | 29.4 | 95 | py |
AFC | AFC-master/inclearn/lib/network/postprocessors.py | import torch
from torch import nn
class ConstantScalar(nn.Module):
def __init__(self, constant=1., bias=0., **kwargs):
super().__init__()
self.factor = constant
self.bias = bias
def on_task_end(self):
pass
def on_epoch_end(self):
pass
def forward(self, x):
if hasattr(self, "bias"):
return self.factor * x + self.bias
else:
return self.factor * x
class FactorScalar(nn.Module):
def __init__(self, initial_value=1., **kwargs):
super().__init__()
self.factor = nn.Parameter(torch.tensor(initial_value))
def on_task_end(self):
pass
def on_epoch_end(self):
pass
def forward(self, inputs):
return self.factor * inputs
def __mul__(self, other):
return self.forward(other)
def __rmul__(self, other):
return self.forward(other)
class InvertedFactorScalar(nn.Module):
def __init__(self, initial_value=1., **kwargs):
super().__init__()
self._factor = nn.Parameter(torch.tensor(initial_value))
@property
def factor(self):
return 1 / (self._factor + 1e-7)
def on_task_end(self):
pass
def on_epoch_end(self):
pass
def forward(self, inputs):
return self.factor * inputs
def __mul__(self, other):
return self.forward(other)
def __rmul__(self, other):
return self.forward(other)
class HeatedUpScalar(nn.Module):
def __init__(self, first_value, last_value, nb_steps, scope="task", **kwargs):
super().__init__()
self.scope = scope
self.first_value = first_value
self.step = (max(first_value, last_value) - min(first_value, last_value)) / (nb_steps - 1)
if first_value > last_value:
self._factor = -1
else:
self._factor = 1
self._increment = 0
print("Heated-up factor is {} with {} scope.".format(self.factor, self.scope))
def on_task_end(self):
if self.scope == "task":
self._increment += 1
print("Heated-up factor is {}.".format(self.factor))
def on_epoch_end(self):
if self.scope == "epoch":
self._increment += 1
@property
def factor(self):
return self.first_value + (self._factor * self._increment * self.step)
def forward(self, inputs):
return self.factor * inputs
| 2,436 | 21.357798 | 98 | py |
AFC | AFC-master/inclearn/lib/network/hook.py | import torch
import torch.nn as nn
def get_gradcam_hook(model):
if isinstance(model, nn.DataParallel):
gradients = [None for _ in model.device_ids]
activations = [None for _ in model.device_ids]
def backward_hook(module, grad_input, grad_output):
gradients[model.device_ids.index(grad_output[0].device.index)] = grad_output[0]
return None
def forward_hook(module, input, output):
activations[model.device_ids.index(output.device.index)] = output
return None
else:
gradients = [None]
activations = [None]
def backward_hook(module, grad_input, grad_output):
gradients[0] = grad_output[0]
return None
def forward_hook(module, input, output):
activations[0] = output
return None
return gradients, activations, backward_hook, forward_hook
| 913 | 29.466667 | 91 | py |
AFC | AFC-master/inclearn/lib/network/memory.py | import torch
from torch import nn
from torch.nn import functional as F
class MemoryBank:
def __init__(self, device, momentum=0.5):
self.features = None
self.targets = None
self.momentum = momentum
self.device = device
def add(self, features, targets):
if self.features is None:
self.features = features
self.targets = targets
else:
self.features = torch.cat((self.features, features.to(self.device)), dim=0)
self.targets = torch.cat((self.targets, targets.to(self.device)), dim=0)
def get(self, indexes):
return self.features[indexes]
def get_neg(self, indexes, n=10):
neg_indexes = torch.ones(len(self.features)).bool()
neg_indexes[indexes] = False
nb = min(n, len(self.features) - len(indexes))
rnd_indexes = torch.multinomial(torch.ones(nb), nb)
return self.features[neg_indexes][rnd_indexes]
def update(self, features, indexes):
self.features[indexes] = self.momentum * self.features[indexes]\
+ (1 - self.momentum * features)
| 1,146 | 29.184211 | 87 | py |
AFC | AFC-master/inclearn/lib/network/calibrators.py | import torch
from torch import nn
class CalibrationWrapper(nn.Module):
"""Wraps several calibration models, each being applied on different targets."""
def __init__(self):
super().__init__()
self.start_indexes = []
self.end_indexes = []
self.models = nn.ModuleList([])
def add_model(self, model, start_index, end_index):
"""Adds a calibration model that will applies on target between the two indexes.
The models must be added in the right targets order!
"""
self.models.append(model)
self.start_indexes.append(start_index)
self.end_indexes.append(end_index)
def forward(self, inputs):
corrected_inputs = []
if self.start_indexes[0] != 0:
corrected_inputs.append(inputs[..., :self.start_indexes[0]])
for model, start_index, end_index in zip(self.models, self.start_indexes, self.end_indexes):
corrected_inputs.append(model(inputs[..., start_index:end_index]))
if self.end_indexes[-1] != inputs.shape[1]:
corrected_inputs.append(inputs[..., self.end_indexes[-1]:])
corrected_inputs = torch.cat(corrected_inputs, dim=-1)
return corrected_inputs
class LinearModel(nn.Module):
"""Linear model applying on the logits alpha * x + beta.
By default, this model is initialized as an identity operation.
See https://arxiv.org/abs/1905.13260 for an example usage.
:param alpha: A learned scalar.
:param beta: A learned scalar.
"""
def __init__(self, alpha=1., beta=0.):
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha))
self.beta = nn.Parameter(torch.tensor(beta))
def forward(self, inputs):
return self.alpha * inputs + self.beta
class TemperatureScaling(nn.Module):
"""Applies a learned temperature on the logits.
See https://arxiv.org/abs/1706.04599.
"""
def __init__(self, temperature=1):
super().__init__()
self.temperature = nn.Parameter(torch.tensor(temperature))
def forward(self, inputs):
return inputs / self.temperature
| 2,150 | 27.68 | 100 | py |
AFC | AFC-master/inclearn/lib/network/mlp.py | from torch import nn
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dims, use_bn=True, input_dropout=0., hidden_dropout=0.):
super().__init__()
layers = []
for index, dim in enumerate(hidden_dims[:-1]):
layers.append(nn.Linear(input_dim, dim, bias=True))
nn.init.normal_(layers[-1].weight, std=0.01)
nn.init.constant_(layers[-1].bias, 0.)
if index < len(hidden_dims) - 1:
if use_bn:
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.LeakyReLU(negative_slope=0.2))
if input_dropout and index == 0:
layers.append(nn.Dropout(p=input_dropout))
elif hidden_dropout and index < len(hidden_dims) - 1:
layers.append(nn.Dropout(p=hidden_dropout))
input_dim = dim
layers.append(nn.Linear(input_dim, hidden_dims[-1]))
nn.init.normal_(layers[-1].weight, std=0.01)
nn.init.constant_(layers[-1].bias, 0.)
self.mlp = nn.Sequential(*layers)
def forward(self, x):
return self.mlp(x)
| 1,129 | 32.235294 | 97 | py |
AFC | AFC-master/inclearn/lib/network/autoencoder.py | import logging
import torch
from torch import nn
from .mlp import MLP
from .word import get_embeddings
logger = logging.getLogger(__name__)
class AdvAutoEncoder(nn.Module):
def __init__(
self,
dataset,
embeddings=None,
encoder_config=None,
decoder_config=None,
discriminator_config=None,
noise_dimension=50,
noise_type="normal",
device=None
):
super().__init__()
self.noise_dimension = noise_dimension
self.noise_type = noise_type
self.emb, _ = get_embeddings(dataset, embeddings, True)
semantic_dim = self.emb.weight.shape[1]
logger.info(f"Semantic dimension: {semantic_dim}.")
if encoder_config is None:
self.encoder = identity
else:
assert encoder_config["hidden_dims"][-1] == noise_dimension
self.encoder = MLP(**encoder_config)
decoder_config["input_dim"] = noise_dimension + semantic_dim
assert decoder_config["hidden_dims"][-1] == encoder_config["input_dim"]
self.decoder = MLP(**decoder_config)
discriminator_config["input_dim"] = noise_dimension
assert discriminator_config["hidden_dims"][-1] == 1
self.discriminator = MLP(**discriminator_config)
self.to(device)
self.device = device
def forward(self, words, real_features):
attributes = self.emb(words)
pred_noise = self.encoder(real_features)
noise = self.get_noise(len(real_features))
pred_x = self.decoder(torch.cat(attributes, pred_noise, dim=-1))
# decode = self.decoder(torch.cat(attributes, noise, dim=-1))
outputs = {"reconstruction": pred_x}
outputs["fake_dis"] = self.discriminator(pred_noise)
outputs["true_dis"] = self.discriminator(noise)
return outputs
def generate(self, words):
attributes = self.emb(words)
noise = self.get_noise(len(words))
return self.decoder(torch.cat((attributes, noise), dim=-1))
def get_noise(self, amount):
if self.noise_type == "normal":
return torch.randn(amount, self.noise_dimension).to(self.device)
elif self.noise_type == "uniform":
return torch.rand(amount, self.noise_dimension).to(self.device)
else:
raise ValueError(f"Unknown noise type {self.noise_type}.")
def identity(x):
return x
| 2,431 | 28.658537 | 79 | py |
AFC | AFC-master/inclearn/lib/network/classifiers.py | import copy
import logging
import numpy as np
import torch
from sklearn.cluster import KMeans
from torch import nn
from torch.nn import functional as F
from inclearn.lib import distance as distance_lib
from inclearn.lib import utils
from .postprocessors import FactorScalar, HeatedUpScalar
logger = logging.getLogger(__name__)
class Classifier(nn.Module):
classifier_type = "fc"
def __init__(
self,
features_dim,
device,
*,
use_bias=False,
normalize=False,
init="kaiming",
train_negative_weights=False,
**kwargs
):
super().__init__()
self.features_dim = features_dim
self.use_bias = use_bias
self.init_method = init
self.device = device
self.normalize = normalize
self._weights = nn.ParameterList([])
self._bias = nn.ParameterList([]) if self.use_bias else None
self.train_negative_weights = train_negative_weights
self._negative_weights = None
self.use_neg_weights = True
self.eval_negative_weights = False
self.proxy_per_class = 1
self.n_classes = 0
def on_task_end(self):
pass
def on_epoch_end(self):
pass
@property
def weights(self):
return torch.cat([w for w in self._weights])
@property
def new_weights(self):
return self._weights[-1]
@property
def old_weights(self):
if len(self._weights) > 1:
return self._weights[:-1]
return None
@property
def bias(self):
if self._bias is not None:
return torch.cat([b for b in self._bias])
return None
@property
def new_bias(self):
return self._bias[-1]
@property
def old_bias(self):
if len(self._bias) > 1:
return self._bias[:-1]
return None
def forward(self, features):
if len(self._weights) == 0:
raise Exception("Add some classes before training.")
weights = self.weights
if self._negative_weights is not None and (
self.training is True or self.eval_negative_weights
) and self.use_neg_weights:
weights = torch.cat((weights, self._negative_weights), 0)
if self.normalize:
features = F.normalize(features, dim=1, p=2)
logits = F.linear(features, weights, bias=self.bias)
return {"logits": logits}
def add_classes(self, n_classes):
self._weights.append(nn.Parameter(torch.randn(n_classes, self.features_dim)))
self._init(self.init_method, self.new_weights)
if self.use_bias:
self._bias.append(nn.Parameter(torch.randn(n_classes)))
self._init(0., self.new_bias)
self.to(self.device)
def reset_weights(self):
self._init(self.init_method, self.weights)
@staticmethod
def _init(init_method, parameters):
if isinstance(init_method, float) or isinstance(init_method, int):
nn.init.constant_(parameters, init_method)
elif init_method == "kaiming":
nn.init.kaiming_normal_(parameters, nonlinearity="linear")
else:
raise NotImplementedError("Unknown initialization method: {}.".format(init_method))
def align_weights(self):
"""Align new weights based on old weights norm.
# Reference:
* Maintaining Discrimination and Fairness in Class Incremental Learning
Zhao et al. 2019
"""
with torch.no_grad():
old_weights = torch.cat([w for w in self.old_weights])
old_norm = torch.mean(old_weights.norm(dim=1))
new_norm = torch.mean(self.new_weights.norm(dim=1))
self._weights[-1] = nn.Parameter((old_norm / new_norm) * self._weights[-1])
def align_features(self, features):
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_features_norm = features.data.norm(dim=1).mean()
features.data = features.data * (avg_weights_norm / avg_features_norm)
return features
def add_custom_weights(self, weights, ponderate=None, **kwargs):
if isinstance(ponderate, str):
if ponderate == "weights_imprinting":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
weights = weights * avg_weights_norm
elif ponderate == "align_weights":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_new_weights_norm = weights.data.norm(dim=1).mean()
ratio = avg_weights_norm / avg_new_weights_norm
weights = weights * ratio
else:
raise NotImplementedError(f"Unknown ponderation type {ponderate}.")
self._weights.append(nn.Parameter(weights))
self.to(self.device)
def set_negative_weights(self, negative_weights, ponderate=False):
"""Add weights that are used like the usual weights, but aren't actually
parameters.
:param negative_weights: Tensor of shape (n_classes * nb_proxy, features_dim)
:param ponderate: Reponderate the negative weights by the existing weights norm, as done by
"Weights Imprinting".
"""
logger.info("Add negative weights.")
if isinstance(ponderate, str):
if ponderate == "weights_imprinting":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
negative_weights = negative_weights * avg_weights_norm
elif ponderate == "align_weights":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_negative_weights_norm = negative_weights.data.norm(dim=1).mean()
ratio = avg_weights_norm / avg_negative_weights_norm
negative_weights = negative_weights * ratio
elif ponderate == "inv_align_weights":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_negative_weights_norm = negative_weights.data.norm(dim=1).mean()
ratio = avg_negative_weights_norm / avg_weights_norm
negative_weights = negative_weights * ratio
else:
raise NotImplementedError(f"Unknown ponderation type {ponderate}.")
if self.train_negative_weights:
self._negative_weights = nn.Parameter(negative_weights)
else:
self._negative_weights = negative_weights
class CosineClassifier(nn.Module):
classifier_type = "cosine"
def __init__(
self,
features_dim,
device,
*,
proxy_per_class=1,
distance="cosine",
merging="softmax",
scaling=1,
gamma=1.,
use_bias=False,
type=None,
pre_fc=None,
negative_weights_bias=None,
train_negative_weights=False,
eval_negative_weights=False
):
super().__init__()
self.n_classes = 0
self._weights = nn.ParameterList([])
self.bias = None
self.features_dim = features_dim
self.proxy_per_class = proxy_per_class
self.device = device
self.distance = distance
self.merging = merging
self.gamma = gamma
self.negative_weights_bias = negative_weights_bias
self.train_negative_weights = train_negative_weights
self.eval_negative_weights = eval_negative_weights
self._negative_weights = None
self.use_neg_weights = True
if isinstance(scaling, int) or isinstance(scaling, float):
self.scaling = scaling
else:
logger.warning("Using inner learned scaling")
self.scaling = FactorScalar(1.)
if proxy_per_class > 1:
logger.info("Using {} proxies per class.".format(proxy_per_class))
if pre_fc is not None:
self.pre_fc = nn.Sequential(
nn.ReLU(inplace=True), nn.BatchNorm1d(self.features_dim),
nn.Linear(self.features_dim, pre_fc)
)
self.features_dim = pre_fc
else:
self.pre_fc = None
self._task_idx = 0
def on_task_end(self):
self._task_idx += 1
if isinstance(self.scaling, nn.Module):
self.scaling.on_task_end()
def on_epoch_end(self):
if isinstance(self.scaling, nn.Module):
self.scaling.on_epoch_end()
def forward(self, features):
if hasattr(self, "pre_fc") and self.pre_fc is not None:
features = self.pre_fc(features)
weights = self.weights
if self._negative_weights is not None and (
self.training is True or self.eval_negative_weights
) and self.use_neg_weights:
weights = torch.cat((weights, self._negative_weights), 0)
if self.distance == "cosine":
raw_similarities = distance_lib.cosine_similarity(features, weights)
elif self.distance == "stable_cosine_distance":
features = self.scaling * F.normalize(features, p=2, dim=-1)
weights = self.scaling * F.normalize(weights, p=2, dim=-1)
raw_similarities = distance_lib.stable_cosine_distance(features, weights)
elif self.distance == "neg_stable_cosine_distance":
features = self.scaling * F.normalize(features, p=2, dim=-1)
weights = self.scaling * F.normalize(weights, p=2, dim=-1)
raw_similarities = -distance_lib.stable_cosine_distance(features, weights)
elif self.distance == "prelu_stable_cosine_distance":
features = self.scaling * F.normalize(F.relu(features), p=2, dim=-1)
weights = self.scaling * F.normalize(weights, p=2, dim=-1)
raw_similarities = distance_lib.stable_cosine_distance(features, weights)
elif self.distance == "prelu_neg_stable_cosine_distance":
features = self.scaling * F.normalize(F.relu(features), p=2, dim=-1)
weights = self.scaling * F.normalize(weights, p=2, dim=-1)
raw_similarities = -distance_lib.stable_cosine_distance(features, weights)
else:
raise NotImplementedError("Unknown distance function {}.".format(self.distance))
if self.proxy_per_class > 1:
similarities = self._reduce_proxies(raw_similarities)
else:
similarities = raw_similarities
if self._negative_weights is not None and self.negative_weights_bias is not None and\
self.training is True:
qt = self._negative_weights.shape[0]
if isinstance(self.negative_weights_bias, float):
similarities[..., -qt:] = torch.clamp(
similarities[..., -qt:] - self.negative_weights_bias, min=0
)
elif isinstance(
self.negative_weights_bias, str
) and self.negative_weights_bias == "min":
min_simi = similarities[..., :-qt].min(dim=1, keepdim=True)[0]
similarities = torch.min(
similarities,
torch.cat((similarities[..., :-qt], min_simi.repeat(1, qt)), dim=1)
)
elif isinstance(
self.negative_weights_bias, str
) and self.negative_weights_bias == "max":
max_simi = similarities[..., :-qt].max(dim=1, keepdim=True)[0] - 1e-6
similarities = torch.min(
similarities,
torch.cat((similarities[..., :-qt], max_simi.repeat(1, qt)), dim=1)
)
elif isinstance(self.negative_weights_bias,
str) and self.negative_weights_bias.startswith("top_"):
topk = int(self.negative_weights_bias.replace("top_", ""))
botk = min(qt - topk, qt)
indexes = (-similarities[..., -qt:]).topk(botk, dim=1)[1]
similarities[..., -qt:].scatter_(1, indexes, 0.)
else:
raise NotImplementedError(f"Unknown {self.negative_weights_bias}.")
return {"logits": similarities, "raw_logits": raw_similarities}
def _reduce_proxies(self, similarities):
# shape (batch_size, n_classes * proxy_per_class)
n_classes = similarities.shape[1] / self.proxy_per_class
assert n_classes.is_integer(), (similarities.shape[1], self.proxy_per_class)
n_classes = int(n_classes)
bs = similarities.shape[0]
if self.merging == "mean":
return similarities.view(bs, n_classes, self.proxy_per_class).mean(-1)
elif self.merging == "softmax":
simi_per_class = similarities.view(bs, n_classes, self.proxy_per_class)
attentions = F.softmax(self.gamma * simi_per_class, dim=-1) # shouldn't be -gamma?
return (simi_per_class * attentions).sum(-1)
elif self.merging == "max":
return similarities.view(bs, n_classes, self.proxy_per_class).max(-1)[0]
elif self.merging == "min":
return similarities.view(bs, n_classes, self.proxy_per_class).min(-1)[0]
else:
raise ValueError("Unknown merging for multiple centers: {}.".format(self.merging))
# ------------------
# Weights management
# ------------------
def align_features(self, features):
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_features_norm = features.data.norm(dim=1).mean()
features.data = features.data * (avg_weights_norm / avg_features_norm)
return features
def add_custom_weights(self, weights, ponderate=None, **kwargs):
if isinstance(ponderate, str):
if ponderate == "weights_imprinting":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
weights = weights * avg_weights_norm
elif ponderate == "align_weights":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_new_weights_norm = weights.data.norm(dim=1).mean()
ratio = avg_weights_norm / avg_new_weights_norm
weights = weights * ratio
else:
raise NotImplementedError(f"Unknown ponderation type {ponderate}.")
self._weights.append(nn.Parameter(weights))
self.to(self.device)
def align_weights(self):
"""Align new weights based on old weights norm.
# Reference:
* Maintaining Discrimination and Fairness in Class Incremental Learning
Zhao et al. 2019
"""
if len(self._weights) == 1:
return
with torch.no_grad():
old_weights = torch.cat([w for w in self.old_weights])
old_norm = torch.mean(old_weights.norm(dim=1))
new_norm = torch.mean(self.new_weights.norm(dim=1))
self._weights[-1] = nn.Parameter((old_norm / new_norm) * self._weights[-1])
def align_weights_i_to_j(self, indexes_i, indexes_j):
with torch.no_grad():
base_weights = self.weights[indexes_i]
old_norm = torch.mean(base_weights.norm(dim=1))
new_norm = torch.mean(self.weights[indexes_j].norm(dim=1))
self.weights[indexes_j] = nn.Parameter((old_norm / new_norm) * self.weights[indexes_j])
def align_inv_weights(self):
"""Align new weights based on old weights norm.
# Reference:
* Maintaining Discrimination and Fairness in Class Incremental Learning
Zhao et al. 2019
"""
with torch.no_grad():
old_weights = torch.cat([w for w in self.old_weights])
old_norm = torch.mean(old_weights.norm(dim=1))
new_norm = torch.mean(self.new_weights.norm(dim=1))
self._weights[-1] = nn.Parameter((new_norm / old_norm) * self._weights[-1])
@property
def weights(self):
return torch.cat([clf for clf in self._weights])
@property
def new_weights(self):
return self._weights[-1]
@property
def old_weights(self):
if len(self._weights) > 1:
return self._weights[:-1]
return None
def add_classes(self, n_classes):
new_weights = nn.Parameter(torch.zeros(self.proxy_per_class * n_classes, self.features_dim))
nn.init.kaiming_normal_(new_weights, nonlinearity="linear")
self._weights.append(new_weights)
self.to(self.device)
self.n_classes += n_classes
return self
def add_imprinted_classes(
self, class_indexes, inc_dataset, network, multi_class_diff="normal", type=None
):
if self.proxy_per_class > 1:
logger.info("Multi class diff {}.".format(multi_class_diff))
weights_norm = self.weights.data.norm(dim=1, keepdim=True)
avg_weights_norm = torch.mean(weights_norm, dim=0).cpu()
new_weights = []
for class_index in class_indexes:
_, loader = inc_dataset.get_custom_loader([class_index])
features, _ = utils.extract_features(network, loader)
features_normalized = F.normalize(torch.from_numpy(features), p=2, dim=1)
class_embeddings = torch.mean(features_normalized, dim=0)
class_embeddings = F.normalize(class_embeddings, dim=0, p=2)
if self.proxy_per_class == 1:
new_weights.append(class_embeddings * avg_weights_norm)
else:
if multi_class_diff == "normal":
std = torch.std(features_normalized, dim=0)
for _ in range(self.proxy_per_class):
new_weights.append(torch.normal(class_embeddings, std) * avg_weights_norm)
elif multi_class_diff == "kmeans":
clusterizer = KMeans(n_clusters=self.proxy_per_class)
clusterizer.fit(features_normalized.numpy())
for center in clusterizer.cluster_centers_:
new_weights.append(torch.tensor(center) * avg_weights_norm)
else:
raise ValueError(
"Unknown multi class differentiation for imprinted weights: {}.".
format(multi_class_diff)
)
new_weights = torch.stack(new_weights)
self._weights.append(nn.Parameter(new_weights))
self.to(self.device)
self.n_classes += len(class_indexes)
return self
def set_negative_weights(self, negative_weights, ponderate=False):
"""Add weights that are used like the usual weights, but aren't actually
parameters.
:param negative_weights: Tensor of shape (n_classes * nb_proxy, features_dim)
:param ponderate: Reponderate the negative weights by the existing weights norm, as done by
"Weights Imprinting".
"""
logger.info("Add negative weights.")
if isinstance(ponderate, str):
if ponderate == "weights_imprinting":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
negative_weights = negative_weights * avg_weights_norm
elif ponderate == "align_weights":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_negative_weights_norm = negative_weights.data.norm(dim=1).mean()
ratio = avg_weights_norm / avg_negative_weights_norm
negative_weights = negative_weights * ratio
elif ponderate == "inv_align_weights":
avg_weights_norm = self.weights.data.norm(dim=1).mean()
avg_negative_weights_norm = negative_weights.data.norm(dim=1).mean()
ratio = avg_negative_weights_norm / avg_weights_norm
negative_weights = negative_weights * ratio
else:
raise NotImplementedError(f"Unknown ponderation type {ponderate}.")
if self.train_negative_weights:
self._negative_weights = nn.Parameter(negative_weights)
else:
self._negative_weights = negative_weights
class MCCosineClassifier(CosineClassifier):
"""CosineClassifier with MC-Dropout."""
def __init__(self, *args, dropout=0.2, nb_samples=10, **kwargs):
super().__init__(*args, **kwargs)
self._dropout = dropout
self.nb_samples = nb_samples
def forward(self, x):
if self.training:
return super().forward(F.dropout(x, p=self._dropout))
sampled_similarities = torch.zeros(x.shape[0], self.nb_samples,
self.n_classes).to(x.device).float()
for i in range(self.nb_samples):
similarities = super().forward(F.dropout(x, p=self._dropout))["logits"]
sampled_similarities[:, i] = similarities
return {
"logits": sampled_similarities.mean(dim=1),
"var_ratio": self.var_ratio(sampled_similarities)
}
def var_ratio(self, sampled_similarities):
predicted_class = sampled_similarities.max(dim=2)[1].cpu().numpy()
hist = np.array(
[
np.histogram(predicted_class[i, :], range=(0, 10))[0]
for i in range(predicted_class.shape[0])
]
)
return 1. - hist.max(axis=1) / self.nb_samples
class CosineM2KDClassifier(CosineClassifier):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._auxilliary_weights = nn.ParameterList([])
self.auxilliary_features_dim = 64 * 8 * 8 # Hard coded penultimate residual block
# Only work on ResNet34-rebuffi with nf=16
def add_imprinted_classes(self, class_indexes, *args, **kwargs):
super().add_imprinted_classes(class_indexes, *args, **kwargs)
self.add_classes_to_auxilliary(len(class_indexes))
def add_classes_to_auxilliary(self, n_classes):
new_weights = nn.Parameter(torch.zeros(n_classes, self.auxilliary_features_dim))
nn.init.kaiming_normal_(new_weights, nonlinearity="linear")
self._auxilliary_weights.append(new_weights)
self.to(self.device)
return self
@property
def auxilliary_weights(self):
return torch.cat([clf for clf in self._weights])
@property
def new_weights(self):
return torch.cat([self._weights[-1], self._auxilliary_weights[-1]])
@property
def old_weights(self):
if len(self._weights) > 1:
return torch.cat([self._weights[:-1], self._auxilliary_weights[:-1]])
return None
class DomainClassifier(nn.Module):
def __init__(self, features_dim, device=None):
super().__init__()
self.features_dim = features_dim
self.device = device
self.gradreverse = GradReverse.apply
self.linear = nn.Linear(features_dim, 1)
self.to(device)
def forward(self, x):
return self.linear(self.gradreverse(x))
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
class BinaryCosineClassifier(nn.Module):
def __init__(self, features_dim):
super().__init__()
self.weight = nn.Parameter(torch.randn(1, features_dim))
nn.init.kaiming_normal_(self.weight, nonlinearity="linear")
def forward(self, x):
x = F.normalize(x, dim=1, p=2)
w = F.normalize(self.weight, dim=1, p=2)
return {"logits": torch.mm(x, w.T)}
| 23,762 | 35.671296 | 100 | py |
AFC | AFC-master/inclearn/lib/network/word.py | import logging
import os
import pickle
import numpy as np
import torch
from scipy.io import loadmat
from torch import nn
from torch.nn import functional as F
import gensim
from inclearn.lib.data import fetch_word_embeddings
from .mlp import MLP
logger = logging.getLogger(__name__)
class Word2vec(nn.Module):
def __init__(
self,
embeddings="googlenews",
dataset="cifar100",
mlp_dims=None,
use_bn=True,
input_dropout=0.2,
hidden_dropout=0.5,
device=None,
noise_dimension=50,
noise_type="normal",
freeze_embedding=True,
scale_embedding=None,
data_path=None
):
super().__init__()
self.emb, _ = get_embeddings(dataset, embeddings, frozen=freeze_embedding, path=data_path)
if isinstance(scale_embedding, list):
logger.info(f"Scaling semantic embedding in {scale_embedding}.")
self.emb.weight.data = Scaler(scale_embedding).fit_transform(self.emb.weight.data)
elif isinstance(scale_embedding, str) and scale_embedding == "l2":
self.emb.weight.data = F.normalize(self.emb.weight.data, dim=-1, p=2)
semantic_dim = self.emb.weight.shape[1]
logger.info(f"Semantic dimension: {semantic_dim}.")
if mlp_dims is not None:
self.mlp = MLP(
input_dim=noise_dimension + semantic_dim,
hidden_dims=mlp_dims,
use_bn=use_bn,
input_dropout=input_dropout,
hidden_dropout=hidden_dropout
)
else:
self.mlp = None
self.noise_dimension = noise_dimension
self.noise_type = noise_type
self.to(device)
self.device = device
self.out_dim = mlp_dims[-1]
self.linear_transform = None
def add_linear_transform(self, bias=False):
self.linear_transform = nn.Linear(self.out_dim, self.out_dim, bias=bias)
#self.linear_transform.weight.data = torch.eye(self.out_dim)
#self.linear_transform.weight.data += torch.empty(self.out_dim, self.out_dim).normal_(mean=0, std=0.1)
if bias:
self.linear_transform.bias.data.fill_(0.)
self.linear_transform.to(self.device)
def forward(self, x, only_word=False):
word = self.emb(x)
if only_word:
return word
if self.noise_dimension:
if self.noise_type == "normal":
noise = torch.randn(len(x), self.noise_dimension).to(word.device)
elif self.noise_type == "uniform":
noise = torch.rand(len(x), self.noise_dimension).to(word.device)
else:
raise ValueError(f"Unknown noise type {self.noise_type}.")
if self.mlp:
fake_features = self.mlp(torch.cat((word, noise), dim=-1))
if self.linear_transform:
fake_features = self.linear_transform(fake_features)
return fake_features
return word
def get_embeddings(dataset, embeddings, path=None, frozen=True):
if dataset == "cifar100":
weights, labels = _get_cifar100_embeddings(embeddings, path)
elif dataset == "awa2_attributes":
weights, labels = _get_awa2_attributes(path)
elif dataset == "awa2_attributes_mat":
weights, labels = _get_awa2_attributes_mat(path)
elif dataset == "cub200_attributes":
weights, labels = _get_cub200_attributes(path)
elif dataset == "cub200_attributes_mat":
weights, labels = _get_cub200_attributes_mat(path)
elif dataset == "apy_attributes_mat":
weights, labels = _get_apy_attributes_mat(path)
elif dataset == "w2v_wiki_imagenet100ucir_300d":
weights = np.load(os.path.join(path, "word2vec_wiki_imagenet100ucir_300d.npy"))
labels = np.load(os.path.join(path, "word2vec_wiki_imagenet100ucir_300d_labels.npy"))
elif dataset == "w2v_wiki_imagenet100_300d":
weights = np.load(os.path.join(path, "word2vec_wiki_imagenet100_300d.npy"))
labels = np.load(os.path.join(path, "word2vec_wiki_imagenet100_300d_labels.npy"))
elif dataset == "w2v_wiki_imagenet100ucir_500d":
weights = np.load(os.path.join(path, "word2vec_wiki_imagenet100ucir_500d.npy"))
labels = np.load(os.path.join(path, "word2vec_wiki_imagenet100ucir_500d_labels.npy"))
elif dataset == "w2v_wiki_imagenet100_500d":
weights = np.load(os.path.join(path, "word2vec_wiki_imagenet100_500d.npy"))
labels = np.load(os.path.join(path, "word2vec_wiki_imagenet100_500d_labels.npy"))
elif dataset == "lad_attributes":
weights, labels = _get_lad_attributes(path)
else:
raise ValueError("Unknown dataset {} for word embeddings.".format(dataset))
emb = torch.nn.Embedding(num_embeddings=weights.shape[0], embedding_dim=weights.shape[1])
emb.weight = torch.nn.Parameter(torch.FloatTensor(weights), requires_grad=not frozen)
return emb, labels
# Prepare embeddings for dataset
def _get_cifar100_embeddings(embeddings_type, data_path=None):
if data_path is None:
data_path = "/data/douillard/"
path = fetch_word_embeddings(data_path, embeddings_type)
gensim_model = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
with open(os.path.join(data_path, "cifar-100-python/meta"), "rb") as f:
meta = pickle.load(f)
labels = meta["fine_label_names"]
fixed_labels = {
"aquarium_fish": "fish",
"lawn_mower": "lawnmower",
"maple_tree": "maple",
"oak_tree": "oak",
"palm_tree": "palm",
"pickup_truck": "pickup",
"pine_tree": "pine",
"sweet_pepper": "pepper",
"willow_tree": "willow"
}
for missing, replacement in fixed_labels.items():
labels[labels.index(missing)] = replacement
trimmed_weights = np.empty((len(labels), gensim_model.vectors.shape[1]), dtype=np.float32)
for i, label in enumerate(labels):
index = gensim_model.index2word.index(label)
trimmed_weights[i] = gensim_model.vectors[index]
return trimmed_weights, labels
def _get_awa2_attributes(path=None):
if path is None:
path = "/data/douillard/"
attributes = np.loadtxt(
os.path.join(path, "awa2/Animals_with_Attributes2/predicate-matrix-continuous.txt")
)
attributes = attributes / np.linalg.norm(attributes, axis=1, keepdims=True)
labels = []
with open(os.path.join(path, "awa2/Animals_with_Attributes2/classes.txt")) as f:
for line in f:
labels.append(line.strip())
return attributes, labels
def _get_awa2_attributes_mat(path=None):
if path is None:
path = "/data/douillard/"
attributes = loadmat(os.path.join(path, "zeroshot_split/xlsa17/data/AWA2/att_splits.mat"))
attributes = attributes["att"].T
labels = []
with open(os.path.join(path, "awa2/Animals_with_Attributes2/classes.txt")) as f:
for line in f:
labels.append(line.strip())
return attributes, labels
def _get_cub200_attributes(path=None):
if path is None:
path = "/data/douillard/"
attributes = np.loadtxt(
os.path.join(path, "CUB_200_2011/attributes/class_attribute_labels_continuous.txt")
)
attributes = attributes / np.linalg.norm(attributes, axis=1, keepdims=True)
labels = []
with open(os.path.join(path, "CUB_200_2011/classes.txt")) as f:
for line in f:
labels.append(line.strip())
return attributes, labels
def _get_cub200_attributes_mat(path=None):
if path is None:
path = "/data/douillard/"
attributes = loadmat(os.path.join(path, "zeroshot_split/xlsa17/data/CUB/att_splits.mat"))
attributes = attributes["att"].T
labels = []
with open(os.path.join(path, "CUB_200_2011/classes.txt")) as f:
for line in f:
labels.append(line.strip())
return attributes, labels
def _get_apy_attributes_mat(path=None):
if path is None:
path = "/data/douillard/"
attributes = loadmat(os.path.join(path, "zeroshot_split/xlsa17/data/APY/att_splits.mat"))
attributes = attributes["att"].T
return attributes, None
def _get_lad_attributes(path=None):
if path is None:
path = "/data/douillard/"
attributes = []
labels = []
with open(os.path.join(path, "LAD/attributes_per_class.txt")) as f:
for line in f:
line = line.strip().split(", ")
label = line[0]
att = line[1]
att = list(map(float, filter(lambda x: len(x) > 0, att[3:-3].split(" "))))
attributes.append(np.array(att))
labels.append(label)
labels = np.array(labels)
attributes = np.stack(attributes)
return attributes, labels
class Scaler:
"""
Transforms each channel to the range [a, b].
"""
def __init__(self, feature_range):
self.feature_range = feature_range
def fit(self, tensor):
data_min = torch.min(tensor, dim=0)[0]
data_max = torch.max(tensor, dim=0)[0]
data_range = data_max - data_min
# Handle null values
data_range[data_range == 0.] = 1.
self.scale_ = (self.feature_range[1] - self.feature_range[0]) / data_range
self.min_ = self.feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, tensor):
return tensor.mul_(self.scale_).add_(self.min_)
def inverse_transform(self, tensor):
return tensor.sub_(self.min_).div_(self.scale_)
def fit_transform(self, tensor):
self.fit(tensor)
return self.transform(tensor)
| 9,761 | 32.662069 | 110 | py |
AFC | AFC-master/inclearn/lib/network/basenet.py | import copy
import logging
import torch
from torch import nn
from inclearn.lib import factory
from .classifiers import (Classifier, CosineClassifier, DomainClassifier, MCCosineClassifier)
from .postprocessors import FactorScalar, HeatedUpScalar, InvertedFactorScalar
from .word import Word2vec
logger = logging.getLogger(__name__)
class BasicNet(nn.Module):
def __init__(
self,
convnet_type,
convnet_kwargs={},
classifier_kwargs={},
postprocessor_kwargs={},
wordembeddings_kwargs={},
init="kaiming",
device=None,
return_features=False,
extract_no_act=False,
classifier_no_act=False,
attention_hook=False,
rotations_predictor=False,
gradcam_hook=False
):
super(BasicNet, self).__init__()
if postprocessor_kwargs.get("type") == "learned_scaling":
self.post_processor = FactorScalar(**postprocessor_kwargs)
elif postprocessor_kwargs.get("type") == "inverted_learned_scaling":
self.post_processor = InvertedFactorScalar(**postprocessor_kwargs)
elif postprocessor_kwargs.get("type") == "heatedup":
self.post_processor = HeatedUpScalar(**postprocessor_kwargs)
elif postprocessor_kwargs.get("type") is None:
self.post_processor = None
else:
raise NotImplementedError(
"Unknown postprocessor {}.".format(postprocessor_kwargs["type"])
)
logger.info("Post processor is: {}".format(self.post_processor))
self.convnet = factory.get_convnet(convnet_type, **convnet_kwargs)
if "type" not in classifier_kwargs:
raise ValueError("Specify a classifier!", classifier_kwargs)
if classifier_kwargs["type"] == "fc":
self.classifier = Classifier(self.convnet.out_dim, device=device, **classifier_kwargs)
elif classifier_kwargs["type"] == "cosine":
self.classifier = CosineClassifier(
self.convnet.out_dim, device=device, **classifier_kwargs
)
elif classifier_kwargs["type"] == "mcdropout_cosine":
self.classifier = MCCosineClassifier(
self.convnet.out_dim, device=device, **classifier_kwargs
)
else:
raise ValueError("Unknown classifier type {}.".format(classifier_kwargs["type"]))
if rotations_predictor:
print("Using a rotations predictor.")
self.rotations_predictor = nn.Linear(self.convnet.out_dim, 4)
else:
self.rotations_predictor = None
if wordembeddings_kwargs:
self.word_embeddings = Word2vec(**wordembeddings_kwargs, device=device)
else:
self.word_embeddings = None
self.return_features = return_features
self.extract_no_act = extract_no_act
self.classifier_no_act = classifier_no_act
self.attention_hook = attention_hook
self.gradcam_hook = gradcam_hook
self.device = device
self.domain_classifier = None
if self.gradcam_hook:
self._hooks = [None, None]
logger.info("Setting gradcam hook for gradients + activations of last conv.")
self.set_gradcam_hook()
if self.extract_no_act:
logger.info("Features will be extracted without the last ReLU.")
if self.classifier_no_act:
logger.info("No ReLU will be applied on features before feeding the classifier.")
self.to(self.device)
def on_task_end(self):
if isinstance(self.classifier, nn.Module):
self.classifier.on_task_end()
if isinstance(self.post_processor, nn.Module):
self.post_processor.on_task_end()
def on_epoch_end(self):
if isinstance(self.classifier, nn.Module):
self.classifier.on_epoch_end()
if isinstance(self.post_processor, nn.Module):
self.post_processor.on_epoch_end()
def forward(
self, x, rotation=False, index=None, features_processing=None, additional_features=None
):
if hasattr(self,
"word_embeddings") and self.word_embeddings is not None and isinstance(x, list):
words = x[1]
x = x[0]
else:
words = None
outputs = self.convnet(x)
if words is not None: # ugly to change
outputs["word_embeddings"] = self.word_embeddings(words)
if hasattr(self, "classifier_no_act") and self.classifier_no_act:
selected_features = outputs["raw_features"]
else:
selected_features = outputs["features"]
if features_processing is not None:
selected_features = features_processing.fit_transform(selected_features)
if rotation:
outputs["rotations"] = self.rotations_predictor(outputs["features"])
nb_inputs = len(x) // 4
#for k in outputs.keys():
# if k != "rotations":
# if isinstance(outputs[k], list):
# outputs[k] = [elt[:32] for elt in outputs[k]]
# else:
# outputs[k] = outputs[k][:32]
else:
if additional_features is not None:
clf_outputs = self.classifier(
torch.cat((selected_features, additional_features), 0)
)
else:
clf_outputs = self.classifier(selected_features)
outputs.update(clf_outputs)
if hasattr(self, "gradcam_hook") and self.gradcam_hook:
outputs["gradcam_gradients"] = self._gradcam_gradients
outputs["gradcam_activations"] = self._gradcam_activations
return outputs
def post_process(self, x):
if self.post_processor is None:
return x
return self.post_processor(x)
@property
def features_dim(self):
return self.convnet.out_dim
def add_classes(self, n_classes):
self.classifier.add_classes(n_classes)
def add_imprinted_classes(self, class_indexes, inc_dataset, **kwargs):
if hasattr(self.classifier, "add_imprinted_classes"):
self.classifier.add_imprinted_classes(class_indexes, inc_dataset, self, **kwargs)
def add_custom_weights(self, weights, **kwargs):
self.classifier.add_custom_weights(weights, **kwargs)
def extract(self, x):
outputs = self.convnet(x)
if self.extract_no_act:
return outputs["raw_features"]
return outputs["features"]
def predict_rotations(self, inputs):
if self.rotations_predictor is None:
raise ValueError("Enable the rotations predictor.")
return self.rotations_predictor(self.convnet(inputs)["features"])
def freeze(self, trainable=False, model="all"):
if model == "all":
model = self
elif model == "convnet":
model = self.convnet
elif model == "classifier":
model = self.classifier
else:
assert False, model
if not isinstance(model, nn.Module):
return self
for param in model.parameters():
param.requires_grad = trainable
if hasattr(self, "gradcam_hook") and self.gradcam_hook and model == "convnet":
for param in self.convnet.last_conv.parameters():
param.requires_grad = True
if not trainable:
model.eval()
else:
model.train()
return self
def get_group_parameters(self):
groups = {"convnet": self.convnet.parameters()}
if isinstance(self.post_processor, FactorScalar):
groups["postprocessing"] = self.post_processor.parameters()
if hasattr(self.classifier, "new_weights"):
groups["new_weights"] = self.classifier.new_weights
if hasattr(self.classifier, "old_weights"):
groups["old_weights"] = self.classifier.old_weights
if self.rotations_predictor:
groups["rotnet"] = self.rotations_predictor.parameters()
if hasattr(self.convnet, "last_block"):
groups["last_block"] = self.convnet.last_block.parameters()
if hasattr(self.classifier, "_negative_weights"
) and isinstance(self.classifier._negative_weights, nn.Parameter):
groups["neg_weights"] = self.classifier._negative_weights
if self.domain_classifier is not None:
groups["domain_clf"] = self.domain_classifier.parameters()
return groups
def copy(self):
return copy.deepcopy(self)
@property
def n_classes(self):
return self.classifier.n_classes
def unset_gradcam_hook(self):
self._hooks[0].remove()
self._hooks[1].remove()
self._hooks[0] = None
self._hooks[1] = None
self._gradcam_gradients, self._gradcam_activations = [None], [None]
def set_gradcam_hook(self):
self._gradcam_gradients, self._gradcam_activations = [None], [None]
def backward_hook(module, grad_input, grad_output):
self._gradcam_gradients[0] = grad_output[0]
return None
def forward_hook(module, input, output):
self._gradcam_activations[0] = output
return None
self._hooks[0] = self.convnet.last_conv.register_backward_hook(backward_hook)
self._hooks[1] = self.convnet.last_conv.register_forward_hook(forward_hook)
def create_domain_classifier(self):
self.domain_classifier = DomainClassifier(self.convnet.out_dim, device=self.device)
return self.domain_classifier
def del_domain_classifier(self):
self.domain_classifier = None
| 9,766 | 35.580524 | 99 | py |
AFC | AFC-master/inclearn/lib/data/datasets.py | import collections
import glob
import logging
import math
import os
import warnings
import numpy as np
from torchvision import datasets, transforms
logger = logging.getLogger(__name__)
class DataHandler:
base_dataset = None
train_transforms = []
test_transforms = []
common_transforms = [transforms.ToTensor()]
class_order = None
open_image = False
def set_custom_transforms(self, transforms):
if transforms:
raise NotImplementedError("Not implemented for modified transforms.")
class iCIFAR10(DataHandler):
base_dataset = datasets.cifar.CIFAR10
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=63 / 255)
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
]
def set_custom_transforms(self, transforms):
if not transforms.get("color_jitter"):
logger.info("Not using color jitter.")
self.train_transforms.pop(-1)
class iCIFAR100(iCIFAR10):
base_dataset = datasets.cifar.CIFAR100
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
class_order = [ # Taken from original iCaRL implementation:
87, 0, 52, 58, 44, 91, 68, 97, 51, 15, 94, 92, 10, 72, 49, 78, 61, 14, 8, 86, 84, 96, 18,
24, 32, 45, 88, 11, 4, 67, 69, 66, 77, 47, 79, 93, 29, 50, 57, 83, 17, 81, 41, 12, 37, 59,
25, 20, 80, 73, 1, 28, 6, 46, 62, 82, 53, 9, 31, 75, 38, 63, 33, 74, 27, 22, 36, 3, 16, 21,
60, 19, 70, 90, 89, 43, 5, 42, 65, 76, 40, 30, 23, 85, 2, 95, 56, 48, 71, 64, 98, 13, 99, 7,
34, 55, 54, 26, 35, 39
]
class iMNIST(DataHandler):
base_dataset = datasets.MNIST
train_transforms = [transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip()]
common_transforms = [transforms.ToTensor()]
class iPermutedMNIST(iMNIST):
def _preprocess_initial_data(self, data):
b, w, h, c = data.shape
data = data.reshape(b, -1, c)
permutation = np.random.permutation(w * h)
data = data[:, permutation, :]
return data.reshape(b, w, h, c)
class ImageNet100(DataHandler):
train_transforms = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=63 / 255)
]
test_transforms = [
transforms.Resize(256),
transforms.CenterCrop(224),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]
imagenet_size = 100
open_image = True
suffix = ""
metadata_path = None
def set_custom_transforms(self, transforms):
if not transforms.get("color_jitter"):
logger.info("Not using color jitter.")
self.train_transforms.pop(-1)
def base_dataset(self, data_path, train=True, download=False):
if download:
warnings.warn(
"ImageNet incremental dataset cannot download itself,"
" please see the instructions in the README."
)
split = "train" if train else "val"
print("Loading metadata of ImageNet_{} ({} split).".format(self.imagenet_size, split))
metadata_path = os.path.join(
data_path if self.metadata_path is None else self.metadata_path,
"{}_{}{}.txt".format(split, self.imagenet_size, self.suffix)
)
self.data, self.targets = [], []
with open(metadata_path) as f:
for line in f:
path, target = line.strip().split(" ")
self.data.append(os.path.join(data_path, path))
self.targets.append(int(target))
self.data = np.array(self.data)
return self
class ImageNet100UCIR(ImageNet100):
suffix = "_ucir"
class ImageNet1000(ImageNet100):
imagenet_size = 1000
class TinyImageNet200(DataHandler):
train_transforms = [
transforms.RandomCrop(64),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=63 / 255)
]
test_transforms = [transforms.Resize(64)]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]
open_image = True
class_order = list(range(200))
def set_custom_transforms(self, transforms_dict):
if not transforms_dict.get("color_jitter"):
logger.info("Not using color jitter.")
self.train_transforms.pop(-1)
if transforms_dict.get("crop"):
logger.info("Crop with padding of {}".format(transforms_dict.get("crop")))
self.train_transforms[0] = transforms.RandomCrop(
64, padding=transforms_dict.get("crop")
)
def base_dataset(self, data_path, train=True, download=False):
if train:
self._train_dataset(data_path)
else:
self._val_dataset(data_path)
return self
def _train_dataset(self, data_path):
self.data, self.targets = [], []
train_dir = os.path.join(data_path, "train")
for class_id, class_name in enumerate(os.listdir(train_dir)):
paths = glob.glob(os.path.join(train_dir, class_name, "images", "*.JPEG"))
targets = [class_id for _ in range(len(paths))]
self.data.extend(paths)
self.targets.extend(targets)
self.data = np.array(self.data)
def _val_dataset(self, data_path):
self.data, self.targets = [], []
self.classes2id = {
class_name: class_id
for class_id, class_name in enumerate(os.listdir(os.path.join(data_path, "train")))
}
self.id2classes = {v: k for k, v in self.classes2id.items()}
with open(os.path.join(data_path, "val", "val_annotations.txt")) as f:
for line in f:
split_line = line.split("\t")
path, class_label = split_line[0], split_line[1]
class_id = self.classes2id[class_label]
self.data.append(os.path.join(data_path, "val", "images", path))
self.targets.append(class_id)
self.data = np.array(self.data)
class AwA2(DataHandler):
test_split = 0.2
train_transforms = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]
test_transforms = [
transforms.Resize(256),
transforms.CenterCrop(224),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize(mean=[0.466, 0.459, 0.397], std=[0.211, 0.206, 0.203])
]
open_image = True
class_order = None
def _create_class_mapping(self, path):
label_to_id = {}
with open(os.path.join(path, "classes.txt"), "r") as f:
for i, line in enumerate(f.readlines()):
label_to_id[line.strip().split("\t")[1]] = i
self.class_order = []
with open(os.path.join(path, "trainclasses.txt"), "r") as f:
for i, line in enumerate(f.readlines()):
self.class_order.append(label_to_id[line.strip()])
with open(os.path.join(path, "testclasses.txt"), "r") as f:
for j, line in enumerate(f.readlines(), start=len(self.class_order)):
self.class_order.append(label_to_id[line.strip()])
assert len(set(self.class_order)) == len(self.class_order)
id_to_label = {v: k for k, v in label_to_id.items()}
return label_to_id, id_to_label
def set_custom_transforms(self, transforms_dict):
pass
def base_dataset(self, data_path, train=True, download=False):
directory = os.path.join(data_path, "awa2", "Animals_with_Attributes2")
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
pass
label_to_id, id_to_label = self._create_class_mapping(directory)
data = collections.defaultdict(list)
for class_directory in os.listdir(os.path.join(directory, "JPEGImages")):
class_id = label_to_id[class_directory]
for image_path in glob.iglob(
os.path.join(directory, "JPEGImages", class_directory, "*jpg")
):
data[class_id].append(image_path)
paths = []
targets = []
for class_id, class_paths in data.items():
rnd_state = np.random.RandomState(seed=1)
indexes = rnd_state.permutation(len(class_paths))
if train:
subset = math.floor(len(indexes) * (1 - self.test_split))
indexes = indexes[:subset]
else:
subset = math.ceil(len(indexes) * self.test_split)
indexes = indexes[subset:]
paths.append(np.array(class_paths)[indexes])
targets.extend([class_id for _ in range(len(indexes))])
self.data = np.concatenate(paths)
self.targets = np.array(targets)
self.label_to_id, self.id_to_label = label_to_id, id_to_label
return self
class CUB200(DataHandler):
test_split = 0.2
train_transforms = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]
test_transforms = [
transforms.Resize(256),
transforms.CenterCrop(224),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize(mean=[0.4836, 0.4921, 0.4243], std=[0.1845, 0.1835, 0.1947])
]
open_image = True
# from The Good, the bad and the ugly:
class_order = [
1, 2, 14, 15, 19, 21, 46, 47, 66, 67, 68, 72, 73, 74, 75, 88, 89, 99,
148, 149, 0, 13, 33, 34, 100, 119, 109, 84, 7, 53, 170, 40, 55, 108,
186, 174, 29, 194, 50, 106, 116, 134, 133, 45, 146, 36, 159, 125, 136,
124, 26, 188, 196, 185, 157, 63, 43, 6, 182, 141, 85, 158, 80, 127,
10, 144, 28, 165, 58, 94, 154, 9, 140, 101, 78, 105, 191, 4, 82, 177,
161, 193, 195, 49, 38, 104, 35, 31, 145, 81, 59, 143, 198, 92, 197,
65, 98, 52, 150, 17, 151, 115, 60, 24, 23, 77, 16, 175, 57, 20, 192,
56, 39, 152, 87, 12, 117, 120, 178, 61, 153, 91, 37, 139, 181, 95, 171,
70, 41, 184, 176, 18, 64, 8, 111, 62, 5, 79, 180, 107, 121, 114, 183,
166, 128, 132, 113, 169, 130, 173, # seen classes
42, 110, 22, 97, 54, 129, 138, 122, 155, 123, 199, 71, 172, 27, 118,
164, 102, 179, 76, 11, 44, 189, 190, 137, 156, 51, 32, 163, 30, 142,
93, 69, 96, 90, 103, 126, 160, 48, 168, 147, 112, 86, 162, 135, 187,
83, 25, 3, 131, 167 # unseen classes
] # yapf: disable
def _create_class_mapping(self, path):
label_to_id = {}
self.class_order = []
with open(os.path.join(path, "classes.txt"), "r") as f:
for i, line in enumerate(f.readlines()):
label_to_id[line.strip().split(" ")[1]] = i
self.class_order.append(i)
id_to_label = {v: k for k, v in label_to_id.items()}
return label_to_id, id_to_label
def set_custom_transforms(self, transforms_dict):
pass
def base_dataset(self, data_path, train=True, download=False):
directory = os.path.join(data_path, "CUB_200_2011")
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
pass
label_to_id, id_to_label = self._create_class_mapping(directory)
train_set = set()
with open(os.path.join(directory, "train_test_split.txt")) as f:
for line in f:
line_id, set_id = line.split(" ")
if int(set_id) == 1:
train_set.add(int(line_id))
c = 1
data = collections.defaultdict(list)
for class_directory in sorted(os.listdir(os.path.join(directory, "images"))):
class_id = label_to_id[class_directory]
for image_path in sorted(
os.listdir(os.path.join(directory, "images", class_directory))
):
if not image_path.endswith("jpg"):
continue
image_path = os.path.join(directory, "images", class_directory, image_path)
if (c in train_set and train) or (c not in train_set and not train):
data[class_id].append(image_path)
c += 1
self.data, self.targets = self._convert(data)
self.label_to_id, self.id_to_label = label_to_id, id_to_label
return self
@staticmethod
def _convert(data):
paths = []
targets = []
for class_id, class_paths in data.items():
paths.extend(class_paths)
targets.extend([class_id for _ in range(len(class_paths))])
return np.array(paths), np.array(targets)
class APY(DataHandler):
test_split = 0.1
test_max_cap = 100
train_transforms = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]
test_transforms = [
transforms.Resize(256),
transforms.CenterCrop(224),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize(mean=[0.3074, 0.2576, 0.2052], std=[0.2272, 0.2147, 0.2105])
]
open_image = True
# from The Good, the bad and the ugly:
class_order = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, # seen classes from pascal VOC
20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31 # unseen classes from yahoo
] # yapf: disable
def _create_class_mapping(self, path):
label_to_id = {}
with open(os.path.join(path, "class_names.txt"), "r") as f:
for i, line in enumerate(f.readlines()):
label_to_id[line.strip()] = i
id_to_label = {v: k for k, v in label_to_id.items()}
return label_to_id, id_to_label
def set_custom_transforms(self, transforms_dict):
pass
def base_dataset(self, data_path, train=True, download=False):
directory = os.path.join(data_path, "APY")
label_to_id, id_to_label = self._create_class_mapping(directory)
paths, targets = [], []
with open(os.path.join(directory, "data.txt")) as f:
for line in f:
p, t = line.split(",")
paths.append(os.path.join(data_path, p))
targets.append(label_to_id[t.strip()])
paths = np.array(paths)
targets = np.array(targets)
self.data, self.targets = [], []
for class_id in np.unique(targets):
rnd_state = np.random.RandomState(seed=1)
indexes = np.where(class_id == targets)[0]
test_amount = int(len(indexes) * self.test_split)
test_amount = min(test_amount, self.test_max_cap)
if train:
amount = len(indexes) - test_amount
else:
amount = test_amount
indexes = rnd_state.choice(indexes, size=amount, replace=False)
self.data.append(paths[indexes])
self.targets.append(targets[indexes])
self.data = np.concatenate(self.data)
self.targets = np.concatenate(self.targets)
self.label_to_id, self.id_to_label = label_to_id, id_to_label
print(f"{len(self.data)} images for {len(self.label_to_id)} classes.")
return self
class LAD(DataHandler):
test_split = 0.1
train_transforms = [
transforms.RandomResizedCrop((224, 224)),
transforms.RandomHorizontalFlip(),
]
test_transforms = [transforms.Resize((224, 224))]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize(mean=[0.5815, 0.5567, 0.5078], std=[0.2364, 0.2393, 0.2487])
]
open_image = True
def _create_class_mapping(self, path):
label_to_id = {}
label_to_id = {}
self.class_order = []
with open(os.path.join(path, "label_list.txt")) as f:
for i, line in enumerate(f):
c = line.strip().split(", ")[1]
label_to_id[c] = i
self.class_order.append(i) # Classes are already in the right order.
id_to_label = {v: k for k, v in label_to_id.items()}
return label_to_id, id_to_label
def set_custom_transforms(self, transforms_dict):
pass
def base_dataset(self, data_path, train=True, download=False):
directory = os.path.join(data_path, "LAD")
label_to_id, id_to_label = self._create_class_mapping(directory)
paths = []
targets = []
base_path = os.path.join(directory, "images/")
for class_folder in os.listdir(base_path):
class_name = "_".join(class_folder.split("_")[1:])
class_id = label_to_id[class_name]
class_folder = os.path.join(base_path, class_folder)
for image_path in glob.iglob(os.path.join(class_folder, "*jpg")):
paths.append(image_path)
targets.append(class_id)
paths = np.array(paths)
targets = np.array(targets)
self.data, self.targets = [], []
for class_id in np.unique(targets):
rnd_state = np.random.RandomState(seed=1)
indexes = np.where(class_id == targets)[0]
if train:
amount = int(len(indexes) * (1 - self.test_split))
else:
amount = int(len(indexes) * self.test_split)
indexes = rnd_state.choice(indexes, size=amount, replace=False)
self.data.append(paths[indexes])
self.targets.append(targets[indexes])
self.data = np.concatenate(self.data)
self.targets = np.concatenate(self.targets)
self.label_to_id, self.id_to_label = label_to_id, id_to_label
print(f"{len(self.data)} images for {len(self.label_to_id)} classes.")
return self
| 18,153 | 31.82821 | 100 | py |
AFC | AFC-master/inclearn/lib/data/incdataset.py | import logging
import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import transforms
from .datasets import (
APY, CUB200, LAD, AwA2, ImageNet100, ImageNet100UCIR, ImageNet1000, TinyImageNet200, iCIFAR10,
iCIFAR100
)
logger = logging.getLogger(__name__)
class IncrementalDataset:
"""Incremental generator of datasets.
:param dataset_name: Among a list of available dataset, that can easily
be defined (see at file's end).
:param random_order: Shuffle the class ordering, else use a cherry-picked
ordering.
:param shuffle: Shuffle batch order between epochs.
:param workers: Number of workers loading the data.
:param batch_size: The batch size.
:param seed: Seed to force determinist class ordering.
:param increment: Number of class to add at each task.
:param validation_split: Percent of training data to allocate for validation.
:param onehot: Returns targets encoded as onehot vectors instead of scalars.
Memory is expected to be already given in an onehot format.
:param initial_increment: Initial increment may be defined if you want to train
on more classes than usual for the first task, like
UCIR does.
"""
def __init__(
self,
dataset_name,
random_order=False,
shuffle=True,
workers=10,
batch_size=128,
seed=1,
increment=10,
validation_split=0.,
onehot=False,
initial_increment=None,
sampler=None,
sampler_config=None,
data_path="data",
class_order=None,
dataset_transforms=None,
all_test_classes=False,
metadata_path=None
):
datasets = _get_datasets(dataset_name)
if metadata_path:
print("Adding metadata path {}".format(metadata_path))
datasets[0].metadata_path = metadata_path
self._setup_data(
datasets,
random_order=random_order,
class_order=class_order,
seed=seed,
increment=increment,
validation_split=validation_split,
initial_increment=initial_increment,
data_path=data_path
)
dataset = datasets[0]()
dataset.set_custom_transforms(dataset_transforms)
self.train_transforms = dataset.train_transforms # FIXME handle multiple datasets
self.test_transforms = dataset.test_transforms
self.common_transforms = dataset.common_transforms
self.open_image = datasets[0].open_image
self._current_task = 0
self._seed = seed
self._batch_size = batch_size
self._workers = workers
self._shuffle = shuffle
self._onehot = onehot
self._sampler = sampler
self._sampler_config = sampler_config
self._all_test_classes = all_test_classes
@property
def n_tasks(self):
return len(self.increments)
@property
def n_classes(self):
return sum(self.increments)
def new_task(self, memory=None, memory_val=None):
if self._current_task >= len(self.increments):
raise Exception("No more tasks.")
min_class = sum(self.increments[:self._current_task])
max_class = sum(self.increments[:self._current_task + 1])
x_train, y_train = self._select(
self.data_train, self.targets_train, low_range=min_class, high_range=max_class
)
nb_new_classes = len(np.unique(y_train))
x_val, y_val = self._select(
self.data_val, self.targets_val, low_range=min_class, high_range=max_class
)
if self._all_test_classes is True:
logger.info("Testing on all classes!")
x_test, y_test = self._select(
self.data_test, self.targets_test, high_range=sum(self.increments)
)
elif self._all_test_classes is not None or self._all_test_classes is not False:
max_class = sum(self.increments[:self._current_task + 1 + self._all_test_classes])
logger.info(
f"Testing on {self._all_test_classes} unseen tasks (max class = {max_class})."
)
x_test, y_test = self._select(self.data_test, self.targets_test, high_range=max_class)
else:
x_test, y_test = self._select(self.data_test, self.targets_test, high_range=max_class)
if self._onehot:
def to_onehot(x):
n = np.max(x) + 1
return np.eye(n)[x]
y_train = to_onehot(y_train)
if memory is not None:
logger.info("Set memory of size: {}.".format(memory[0].shape[0]))
x_train, y_train, train_memory_flags = self._add_memory(x_train, y_train, *memory)
else:
train_memory_flags = np.zeros((x_train.shape[0],))
if memory_val is not None:
logger.info("Set validation memory of size: {}.".format(memory_val[0].shape[0]))
x_val, y_val, val_memory_flags = self._add_memory(x_val, y_val, *memory_val)
else:
val_memory_flags = np.zeros((x_val.shape[0],))
train_loader = self._get_loader(x_train, y_train, train_memory_flags, mode="train")
val_loader = self._get_loader(x_val, y_val, val_memory_flags,
mode="train") if len(x_val) > 0 else None
test_loader = self._get_loader(x_test, y_test, np.zeros((x_test.shape[0],)), mode="test")
task_info = {
"min_class": min_class,
"max_class": max_class,
"total_n_classes": sum(self.increments),
"increment": nb_new_classes, # self.increments[self._current_task],
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": x_train.shape[0],
"n_test_data": x_test.shape[0]
}
self._current_task += 1
return task_info, train_loader, val_loader, test_loader
def _add_memory(self, x, y, data_memory, targets_memory):
if self._onehot: # Need to add dummy zeros to match the number of targets:
targets_memory = np.concatenate(
(
targets_memory,
np.zeros((targets_memory.shape[0], self.increments[self._current_task]))
),
axis=1
)
memory_flags = np.concatenate((np.zeros((x.shape[0],)), np.ones((data_memory.shape[0],))))
x = np.concatenate((x, data_memory))
y = np.concatenate((y, targets_memory))
return x, y, memory_flags
def get_custom_loader(
self, class_indexes, memory=None, mode="test", data_source="train", sampler=None
):
"""Returns a custom loader.
:param class_indexes: A list of class indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if not isinstance(class_indexes, list): # TODO: deprecated, should always give a list
class_indexes = [class_indexes]
if data_source == "train":
x, y = self.data_train, self.targets_train
elif data_source == "val":
x, y = self.data_val, self.targets_val
elif data_source == "test":
x, y = self.data_test, self.targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
data, targets = [], []
for class_index in class_indexes:
class_data, class_targets = self._select(
x, y, low_range=class_index, high_range=class_index + 1
)
data.append(class_data)
targets.append(class_targets)
if len(data) == 0:
assert memory is not None
else:
data = np.concatenate(data)
targets = np.concatenate(targets)
if (not isinstance(memory, tuple) and
memory is not None) or (isinstance(memory, tuple) and memory[0] is not None):
if len(data) > 0:
data, targets, memory_flags = self._add_memory(data, targets, *memory)
else:
data, targets = memory
memory_flags = np.ones((data.shape[0],))
else:
memory_flags = np.zeros((data.shape[0],))
return data, self._get_loader(
data, targets, memory_flags, shuffle=False, mode=mode, sampler=sampler
)
def get_memory_loader(self, data, targets):
return self._get_loader(
data, targets, np.ones((data.shape[0],)), shuffle=True, mode="train"
)
def _select(self, x, y, low_range=0, high_range=0):
idxes = np.where(np.logical_and(y >= low_range, y < high_range))[0]
return x[idxes], y[idxes]
def _get_loader(self, x, y, memory_flags, shuffle=True, mode="train", sampler=None):
if mode == "train":
trsf = transforms.Compose([*self.train_transforms, *self.common_transforms])
elif mode == "test":
trsf = transforms.Compose([*self.test_transforms, *self.common_transforms])
elif mode == "flip":
trsf = transforms.Compose(
[
transforms.RandomHorizontalFlip(p=1.), *self.test_transforms,
*self.common_transforms
]
)
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
sampler = sampler or self._sampler
if sampler is not None and mode == "train":
logger.info("Using sampler {}".format(sampler))
sampler = sampler(y, memory_flags, batch_size=self._batch_size, **self._sampler_config)
batch_size = 1
else:
sampler = None
batch_size = self._batch_size
return DataLoader(
DummyDataset(x, y, memory_flags, trsf, open_image=self.open_image),
batch_size=batch_size,
shuffle=shuffle if sampler is None else False,
num_workers=self._workers,
batch_sampler=sampler
)
def _setup_data(
self,
datasets,
random_order=False,
class_order=None,
seed=1,
increment=10,
validation_split=0.,
initial_increment=None,
data_path="data"
):
# FIXME: handles online loading of images
self.data_train, self.targets_train = [], []
self.data_test, self.targets_test = [], []
self.data_val, self.targets_val = [], []
self.increments = []
self.class_order = []
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
train_dataset = dataset().base_dataset(data_path, train=True, download=True)
test_dataset = dataset().base_dataset(data_path, train=False, download=True)
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._split_per_class(
x_train, y_train, validation_split
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = list(range(len(np.unique(y_train))))
if random_order:
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
elif class_order:
order = class_order
elif dataset.class_order is not None:
order = dataset.class_order
elif train_dataset.class_order is not None:
order = train_dataset.class_order
logger.info("Dataset {}: class ordering: {}.".format(dataset.__name__, order))
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if len(datasets) > 1:
self.increments.append(len(order))
elif initial_increment is None:
nb_steps = len(order) / increment
remainder = len(order) - int(nb_steps) * increment
if not nb_steps.is_integer():
logger.warning(
f"THe last step will have sligthly less sample ({remainder} vs {increment})."
)
self.increments = [increment for _ in range(int(nb_steps))]
self.increments.append(remainder)
else:
self.increments = [increment for _ in range(int(nb_steps))]
else:
self.increments = [initial_increment]
nb_steps = (len(order) - initial_increment) / increment
remainder = (len(order) - initial_increment) - int(nb_steps) * increment
if not nb_steps.is_integer():
logger.warning(
f"THe last step will have sligthly less sample ({remainder} vs {increment})."
)
self.increments.extend([increment for _ in range(int(nb_steps))])
self.increments.append(remainder)
else:
self.increments.extend([increment for _ in range(int(nb_steps))])
self.data_train.append(x_train)
self.targets_train.append(y_train)
self.data_val.append(x_val)
self.targets_val.append(y_val)
self.data_test.append(x_test)
self.targets_test.append(y_test)
self.data_train = np.concatenate(self.data_train)
self.targets_train = np.concatenate(self.targets_train)
self.data_val = np.concatenate(self.data_val)
self.targets_val = np.concatenate(self.targets_val)
self.data_test = np.concatenate(self.data_test)
self.targets_test = np.concatenate(self.targets_test)
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
@staticmethod
def _split_per_class(x, y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
shuffled_indexes = np.random.permutation(x.shape[0])
x = x[shuffled_indexes]
y = y[shuffled_indexes]
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val.append(x[val_indexes])
y_val.append(y[val_indexes])
x_train.append(x[train_indexes])
y_train.append(y[train_indexes])
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
return x_val, y_val, x_train, y_train
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, x, y, memory_flags, trsf, open_image=False):
self.x, self.y = x, y
self.memory_flags = memory_flags
self.trsf = trsf
self.open_image = open_image
assert x.shape[0] == y.shape[0] == memory_flags.shape[0]
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
x, y = self.x[idx], self.y[idx]
memory_flag = self.memory_flags[idx]
if self.open_image:
img = Image.open(x).convert("RGB")
else:
img = Image.fromarray(x.astype("uint8"))
img = self.trsf(img)
return {"inputs": img, "targets": y, "memory_flags": memory_flag}
def _get_datasets(dataset_names):
return [_get_dataset(dataset_name) for dataset_name in dataset_names.split("-")]
def _get_dataset(dataset_name):
dataset_name = dataset_name.lower().strip()
if dataset_name == "cifar10":
return iCIFAR10
elif dataset_name == "cifar100":
return iCIFAR100
elif dataset_name == "imagenet100":
return ImageNet100
elif dataset_name == "imagenet100ucir":
return ImageNet100UCIR
elif dataset_name == "imagenet1000":
return ImageNet1000
elif dataset_name == "tinyimagenet":
return TinyImageNet200
elif dataset_name == "awa2":
return AwA2
elif dataset_name == "cub200":
return CUB200
elif dataset_name == "apy":
return APY
elif dataset_name == "lad":
return LAD
else:
raise NotImplementedError("Unknown dataset {}.".format(dataset_name))
| 17,267 | 36.457701 | 101 | py |
AFC | AFC-master/inclearn/lib/data/samplers.py | import numpy as np
from torch.utils.data.sampler import BatchSampler
class MemoryOverSampler(BatchSampler):
def __init__(self, y, memory_flags, batch_size=128, **kwargs):
self.indexes = self._oversample(y, memory_flags)
self.batch_size = batch_size
def __len__(self):
return len(self.indexes) // self.batch_size
def __iter__(self):
np.random.shuffle(self.indexes)
for batch_index in range(len(self)):
low_index = batch_index * self.batch_size
high_index = (batch_index + 1) * self.batch_size
yield self.indexes[low_index:high_index].tolist()
def _oversample(self, y, memory_flags):
old_indexes = np.where(memory_flags == 1.)[0]
new_indexes = np.where(memory_flags == 0.)[0]
old, new = y[old_indexes], y[new_indexes]
old_qt = self._mean_quantity(old)
new_qt = self._mean_quantity(new)
assert new_qt > old_qt, (new_qt, old_qt)
factor = new_qt / old_qt
indexes = [np.where(memory_flags == 0)[0]]
for class_id in np.unique(y):
indexes.append(np.repeat(np.where(old == class_id)[0], factor))
indexes = np.concatenate(indexes)
return indexes
@staticmethod
def _mean_quantity(y):
return np.mean(np.bincount(y))
class MultiSampler(BatchSampler):
"""Sample same batch several times. Every time it's a little bit different
due to data augmentation. To be used with ensembling models."""
def __init__(self, nb_samples, batch_size, factor=1, **kwargs):
self.nb_samples = nb_samples
self.factor = factor
self.batch_size = batch_size
def __len__(self):
return len(self.y) / self.batch_size
def __iter__(self):
pass
class TripletCKSampler(BatchSampler):
"""Samples positives pair that will be then be mixed in triplets.
C = number of classes
K = number of instances per class
References:
* Facenet: A unified embedding for face recognition and clustering
Schroff et al.
CVPR 2015.
"""
def __init__(self, y, nb_per_class=4, nb_classes=20):
assert len(np.unique(y)) >= nb_classes
self.y = y
self.nb_per_class = nb_per_class
self.nb_classes = nb_classes
self._classes = np.unique(y)
self._class_to_indexes = {
class_idx: np.where(y == class_idx)[0] for class_idx in self._classes
}
def __len__(self):
return len(self.y) // (self.nb_per_class * self.nb_classes)
def __iter__(self):
for _ in range(len(self)):
indexes = []
classes = np.random.choice(self._classes, size=self.nb_classes, replace=False)
for class_id in classes:
class_indexes = np.random.choice(
self._class_to_indexes[class_id],
size=self.nb_per_class,
replace=bool(len(self._class_to_indexes[class_id]) < self.nb_per_class)
)
indexes.extend(class_indexes.tolist())
yield indexes
class TripletSampler(BatchSampler):
"""Samples elements so that each batch is constitued by a third of anchor, a third
of positive, and a third of negative.
Reference:
* Openface: A general-purpose face recognition library with mobile applications.
Amos et al.
2016
"""
def __init__(self, y, batch_size=128):
self.y = y
self.batch_size = (batch_size // 3)
print("Triplet Sampler has a batch size of {}.".format(3 * self.batch_size))
self._classes = set(np.unique(y).tolist())
self._class_to_indexes = {
class_idx: np.where(y == class_idx)[0] for class_idx in self._classes
}
self._indexes = np.arange(len(y))
def __len__(self):
return len(self.y) // self.batch_size
def __iter__(self):
self._random_permut()
for batch_index in range(len(self)):
indexes = []
for i in range(self.batch_size):
anchor_index = self._indexes[batch_index * i]
anchor_class = self.y[batch_index * i]
pos_index = anchor_index
while pos_index == anchor_index:
pos_index = np.random.choice(self._class_to_indexes[anchor_class])
neg_class = np.random.choice(list(self._classes - set([anchor_class])))
neg_index = np.random.choice(self._class_to_indexes[neg_class])
indexes.append(anchor_index)
indexes.append(pos_index)
indexes.append(neg_index)
yield indexes
def _random_permut(self):
shuffled_indexes = np.random.permutation(len(self.y))
self.y = self.y[shuffled_indexes]
self._indexes = self._indexes[shuffled_indexes]
class NPairSampler(BatchSampler):
def __init__(self, y, n_classes=10, n_samples=2, **kwargs):
self.y = y
self.n_classes = n_classes
self.n_samples = n_samples
self._classes = np.sort(np.unique(y))
self._distribution = np.bincount(y) / np.bincount(y).sum()
self._batch_size = self.n_samples * self.n_classes
self._class_to_indexes = {
class_index: np.where(y == class_index)[0] for class_index in self._classes
}
self._class_counter = {class_index: 0 for class_index in self._classes}
def __iter__(self):
for indexes in self._class_to_indexes.values():
np.random.shuffle(indexes)
count = 0
while count + self._batch_size < len(self.y):
classes = np.random.choice(
self._classes, self.n_classes, replace=False, p=self._distribution
)
batch_indexes = []
for class_index in classes:
class_counter = self._class_counter[class_index]
class_indexes = self._class_to_indexes[class_index]
class_batch_indexes = class_indexes[class_counter:class_counter + self.n_samples]
batch_indexes.extend(class_batch_indexes)
self._class_counter[class_index] += self.n_samples
if self._class_counter[class_index] + self.n_samples > len(
self._class_to_indexes[class_index]
):
np.random.shuffle(self._class_to_indexes[class_index])
self._class_counter[class_index] = 0
yield batch_indexes
count += self.n_classes * self.n_samples
def __len__(self):
return len(self.y) // self._batch_size
| 6,690 | 31.014354 | 97 | py |
AFC | AFC-master/inclearn/lib/losses/base.py | import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def binarize_and_smooth_labels(T, nb_classes, smoothing_const=0.1):
import sklearn.preprocessing
T = T.cpu().numpy()
T = sklearn.preprocessing.label_binarize(T, classes=range(0, nb_classes))
T = T * (1 - smoothing_const)
T[T == 0] = smoothing_const / (nb_classes - 1)
T = torch.FloatTensor(T)
return T
def cross_entropy_teacher_confidence(similarities, targets, old_confidence, memory_indexes):
memory_indexes = memory_indexes.byte()
per_sample_losses = F.cross_entropy(similarities, targets, reduction="none")
memory_losses = per_sample_losses[memory_indexes]
new_losses = per_sample_losses[~memory_indexes]
memory_old_confidence = old_confidence[memory_indexes]
memory_targets = targets[memory_indexes]
right_old_confidence = memory_old_confidence[torch.arange(memory_old_confidence.shape[0]),
memory_targets]
hard_indexes = right_old_confidence.le(0.5)
factors = 2 * (1 + (1 - right_old_confidence[hard_indexes]))
loss = torch.mean(
torch.cat(
(new_losses, memory_losses[~hard_indexes], memory_losses[hard_indexes] * factors)
)
)
return loss
def nca(
similarities,
targets,
class_weights=None,
focal_gamma=None,
scale=1,
margin=0.,
exclude_pos_denominator=True,
hinge_proxynca=False,
memory_flags=None,
):
"""Compute AMS cross-entropy loss.
Reference:
* Goldberger et al.
Neighbourhood components analysis.
NeuriPS 2005.
* Feng Wang et al.
Additive Margin Softmax for Face Verification.
Signal Processing Letters 2018.
:param similarities: Result of cosine similarities between weights and features.
:param targets: Sparse targets.
:param scale: Multiplicative factor, can be learned.
:param margin: Margin applied on the "right" (numerator) similarities.
:param memory_flags: Flags indicating memory samples, although it could indicate
anything else.
:return: A float scalar loss.
"""
margins = torch.zeros_like(similarities)
margins[torch.arange(margins.shape[0]), targets] = margin
similarities = scale * (similarities - margin)
if exclude_pos_denominator: # NCA-specific
similarities = similarities - similarities.max(1)[0].view(-1, 1) # Stability
disable_pos = torch.zeros_like(similarities)
disable_pos[torch.arange(len(similarities)),
targets] = similarities[torch.arange(len(similarities)), targets]
numerator = similarities[torch.arange(similarities.shape[0]), targets]
denominator = similarities - disable_pos
losses = numerator - torch.log(torch.exp(denominator).sum(-1))
if class_weights is not None:
losses = class_weights[targets] * losses
losses = -losses
if hinge_proxynca:
losses = torch.clamp(losses, min=0.)
loss = torch.mean(losses)
return loss
return F.cross_entropy(similarities, targets, weight=class_weights, reduction="mean")
def embeddings_similarity(features_a, features_b):
return F.cosine_embedding_loss(
features_a, features_b,
torch.ones(features_a.shape[0]).to(features_a.device)
)
def logit_distil(features_a, features_b, factor):
features_a = F.normalize(features_a, dim=1, p=2)
features_b = F.normalize(features_b, dim=1, p=2)
factor = factor.reshape([1,-1])
loss = torch.mean(torch.sum(factor*((features_a-features_b)**2),1))
return loss
def ucir_ranking(logits, targets, n_classes, task_size, nb_negatives=2, margin=0.2):
"""Hinge loss from UCIR.
Taken from: https://github.com/hshustc/CVPR19_Incremental_Learning
# References:
* Learning a Unified Classifier Incrementally via Rebalancing
Hou et al.
CVPR 2019
"""
gt_index = torch.zeros(logits.size()).to(logits.device)
gt_index = gt_index.scatter(1, targets.view(-1, 1), 1).ge(0.5)
gt_scores = logits.masked_select(gt_index)
# get top-K scores on novel classes
num_old_classes = logits.shape[1] - task_size
max_novel_scores = logits[:, num_old_classes:].topk(nb_negatives, dim=1)[0]
# the index of hard samples, i.e., samples of old classes
hard_index = targets.lt(num_old_classes)
hard_num = torch.nonzero(hard_index).size(0)
if hard_num > 0:
gt_scores = gt_scores[hard_index].view(-1, 1).repeat(1, nb_negatives)
max_novel_scores = max_novel_scores[hard_index]
assert (gt_scores.size() == max_novel_scores.size())
assert (gt_scores.size(0) == hard_num)
loss = nn.MarginRankingLoss(margin=margin)(gt_scores.view(-1, 1), \
max_novel_scores.view(-1, 1), torch.ones(hard_num*nb_negatives).to(logits.device))
return loss
return torch.tensor(0).float()
| 5,013 | 33.819444 | 94 | py |
AFC | AFC-master/inclearn/lib/losses/unsupervised.py | import torch
from torch.nn import functional as F
def unsupervised_rotations(inputs, memory_flags, network, apply_on="all", factor=1.0, **kwargs):
"""Rotates inputs by 90° four times, and predict the angles.
References:
* Spyros Gidaris, Praveer Singh, Nikos Komodakis
Unsupervised Representation Learning by Predicting Image Rotations
ICLR 2018
:param inputs: Inputs images.
:param memory_flags: Flags signaling if the inputs are from the memory.
:param network: Network used to predict rotations.
:param config: A dict of configuration for this loss.
:return: A float scalar loss.
"""
if apply_on == "all":
selected_inputs = inputs
elif apply_on == "old":
selected_inputs = inputs[memory_flags.eq(1.)]
elif apply_on == "new":
selected_inputs = inputs[memory_flags.eq(0.)]
else:
raise ValueError("Invalid apply for rotation prediction: {}.".format(apply_on))
if len(selected_inputs) == 0:
return torch.tensor(0.)
rotated_inputs = [selected_inputs]
angles = [torch.zeros(len(selected_inputs))]
for ang in range(1, 4):
rotated_inputs.append(selected_inputs.rot90(ang, (2, 3)))
angles.append(torch.ones(len(selected_inputs)) * ang)
rotated_inputs = torch.cat(rotated_inputs)
angles = torch.cat(angles).long().to(inputs.device)
outputs = network(rotated_inputs, rotation=True, index=len(inputs))
loss = factor * F.cross_entropy(outputs["rotations"], angles)
return loss, outputs
| 1,553 | 34.318182 | 96 | py |
AFC | AFC-master/inclearn/lib/losses/metrics.py | import itertools
import numpy as np
import torch
from torch.nn import functional as F
def triplet_loss(
features,
targets,
squaredl2=False,
triplet_selection="all",
margin=0.2,
factor=1.,
normalize=False,
aggreg="mean",
harmonic_embeddings=None,
old_features=None,
memory_flags=None,
epoch_percent=None
):
"""Triplet loss, reducing distance between two similar samples & maximizing distances with a third
dissimilar sample.
References:
* Deep metric learning using Triplet network
Hoffer et al.
2014
* Deep Triplet Ranking Networks for One-Shot Recognition
Meng et al.
2018
* Facenet: A unified embedding for face recognition and clustering
Schroff et al.
CVPR 2015.
* (AdaMine) Cross-Modal Retrieval in the Cooking Context: Learning
Semantic Text-Image Embeddings
Carvalho et al.
2018
:param features: A batch of 1d features.
:param targets: Sparse targets.
:param distance: Distance to use.
:param ranking: To use Triplet Ranking Loss instead of Triplet Loss.
:param aggreg: Aggregation method for every triplets.
:param margin: Margin to push negative far appart.
:param factor: A float factor multiplied by the loss.
:return: A float scalar loss.
"""
if normalize:
features = F.normalize(features, dim=1, p=2)
if harmonic_embeddings and old_features is not None:
if harmonic_embeddings["select"] == "old":
old_features = old_features[memory_flags.eq(1.)]
old_targets = targets[memory_flags.eq(1.)]
elif harmonic_embeddings["select"] == "all":
old_targets = targets
else:
raise ValueError(
"Unknown harmonic embeddings selection {}.".format(harmonic_embeddings["select"])
)
features = torch.cat((features, old_features))
targets = torch.cat((targets, old_targets))
# Generate a distance matrix of shape (batch_size, batch_size).
# The diagonal is obviously null.
distance_matrix = _pairwise_distance(features, squared=squaredl2)
if triplet_selection == "all":
triplet_losses = _select_all_triplets(
distance_matrix, _get_triplet_mask(targets), margin=margin
)
loss = _aggreg_triplet_losses(triplet_losses, aggreg=aggreg)
elif triplet_selection == "hard":
triplet_losses = _select_hardest_triplets(distance_matrix, targets, margin=margin)
loss = _aggreg_triplet_losses(triplet_losses, aggreg=aggreg)
elif triplet_selection == "all_hard":
triplet_losses = _select_all_triplets(
distance_matrix, _get_triplet_mask(targets), margin=margin
)
loss_all = _aggreg_triplet_losses(triplet_losses, aggreg=aggreg)
triplet_losses_hard = _select_hardest_triplets(distance_matrix, targets, margin=margin)
loss_hard = _aggreg_triplet_losses(triplet_losses_hard, aggreg=aggreg)
loss = (1 - epoch_percent) * loss_all + epoch_percent * loss_hard
else:
raise ValueError("Unknown triplet selection {}.".format(triplet_selection))
return factor * loss, _get_per_violated_margin(triplet_losses)
# -----------------
# Private functions
# -----------------
# Selecting
def _select_all_triplets(distance_matrix, triplet_mask, margin=0.2):
"""
See:
* https://omoindrot.github.io/triplet-loss
"""
anchor_positive_dist = distance_matrix.unsqueeze(2)
anchor_negative_dist = distance_matrix.unsqueeze(1)
# Compute a 3D tensor of size (batch_size, batch_size, batch_size)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
# Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
# and the 2nd (batch_size, 1, batch_size)
if margin == "soft":
all_triplets = torch.log(1 + torch.exp(anchor_positive_dist - anchor_negative_dist))
else:
all_triplets = anchor_positive_dist - anchor_negative_dist + margin
# Remove the invalid triplets
# (where label(a) != label(p) or label(n) == label(a) or a == p)
valid_triplets = all_triplets[triplet_mask]
# Remove negative losses (i.e. the easy triplets)
pos_triplets = valid_triplets.clamp(min=0.)
return pos_triplets
def _select_hardest_triplets(distance_matrix, targets, margin=0.2):
"""
See:
* https://omoindrot.github.io/triplet-loss
"""
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = _get_anchor_positive_triplet_mask(targets).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * distance_matrix
# shape (batch_size, 1)
hardest_positive_dist = anchor_positive_dist.max(dim=1, keepdims=True)[0]
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = _get_anchor_negative_triplet_mask(targets).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist = distance_matrix.max(dim=1, keepdims=True)[0]
anchor_negative_dist = distance_matrix + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist = anchor_negative_dist.min(dim=1, keepdims=True)[0]
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
triplet_losses = torch.clamp(hardest_positive_dist - hardest_negative_dist + margin, min=0.)
return triplet_losses
# -----------------
# Masking functions
# -----------------
def _get_triplet_mask(targets):
"""Generates a mask (anchor, positive, negative).
Taken from:
https://github.com/omoindrot/tensorflow-triplet-loss/blob/master/model/triplet_loss.py
"""
indexes_not_equal = ~torch.eye(len(targets)).bool().to(targets.device)
i_not_j = indexes_not_equal.unsqueeze(2)
i_not_k = indexes_not_equal.unsqueeze(1)
j_not_k = indexes_not_equal.unsqueeze(0)
distinct_indexes = (i_not_j & i_not_k) & j_not_k
labels_equal = targets.unsqueeze(0) == targets.unsqueeze(1)
i_eq_j = labels_equal.unsqueeze(2)
i_eq_k = labels_equal.unsqueeze(1)
valid_labels = i_eq_j & (~i_eq_k)
mask = distinct_indexes & valid_labels
return mask
def _get_anchor_positive_triplet_mask(targets):
"""
Taken from:
https://github.com/omoindrot/tensorflow-triplet-loss/blob/master/model/triplet_loss.py
"""
indexes_not_equal = ~torch.eye(len(targets)).bool().to(targets.device)
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = targets.unsqueeze(0) == targets.unsqueeze(1)
# Combine the two masks
mask = indexes_not_equal & labels_equal
return mask
def _get_anchor_negative_triplet_mask(targets):
"""
Taken from:
https://github.com/omoindrot/tensorflow-triplet-loss/blob/master/model/triplet_loss.py
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = targets.unsqueeze(0) == targets.unsqueeze(1)
mask = ~labels_equal
return mask
# ----
# Misc
# ----
def _get_per_violated_margin(triplet_losses):
nb_total = len(triplet_losses)
nb_violated = len(triplet_losses[triplet_losses > 1e-8])
return int(100 * nb_violated / nb_total)
def _aggreg_triplet_losses(triplet_losses, aggreg="mean"):
if aggreg == "mean":
return triplet_losses.mean()
elif aggreg == "max":
return triplet_losses.max()
elif aggreg == "adamine":
nb_not_null = len(triplet_losses[triplet_losses > 0.])
return triplet_losses.sum() / nb_not_null
raise ValueError("Unknown aggregation method {}.".format(aggreg))
def _triplet(pos_distance, neg_distance, margin, aggreg="mean"):
triplets = torch.clamp(margin + pos_distance - neg_distance, min=0.)
if aggreg == "mean":
return torch.mean(triplets)
elif aggreg == "sum":
return torch.sum(triplets)
elif aggreg == "adamine":
return torch.sum(triplets) / max(len(triplets[triplets > 0]), 1)
raise ValueError("Unknown aggregation method for triplet: {}.".format(aggreg))
def _triplet_facenet_sampling(features, targets, semihard=True, distance="l2squared"):
# Forgive me for this code...
# Generate a distance matrix of shape (batch_size, batch_size).
# The diagonal is obviously null.
pairwise_distances = _dense_distance(features, distance_type=distance)
anchor_indexes, positive_indexes, negative_indexes = [], [], []
targets = targets.cpu().numpy()
for target in set(targets.tolist()):
indexes = np.where(targets == target)[0].tolist()
neg_indexes = np.where(targets != target)[0].tolist()
positive_pairs = list(itertools.combinations(indexes, 2))
_anchors = torch.tensor([pair[0] for pair in positive_pairs])
_positives = torch.tensor([pair[1] for pair in positive_pairs])
if semihard:
ap_dist = pairwise_distances[_anchors, _positives]
nb_pos = len(indexes)
nb_neg = len(targets) - nb_pos
an_dist = pairwise_distances[torch.tensor(indexes).repeat_interleave(nb_neg, 0),
torch.tensor(neg_indexes).repeat(1, nb_pos)[0]]
anchors = []
positives = []
negatives = []
for i in range(len(ap_dist)):
if (ap_dist[i] < an_dist[i]).any():
negatives.append(
neg_indexes[(an_dist[i] == an_dist[i][ap_dist[i] < an_dist[i]].min()
).argmax().item()]
)
positives.append(_positives[i])
anchors.append(_anchors[i])
else:
negatives = np.random.choice(neg_indexes, size=len(_anchors), replace=False).tolist()
anchors = _anchors.tolist()
positives = _positives.tolist()
assert len(negatives) == len(anchors) == len(positives)
anchor_indexes.extend(anchors)
positive_indexes.extend(positives)
negative_indexes.extend(negatives)
return torch.tensor(anchor_indexes), torch.tensor(positive_indexes
), torch.tensor(negative_indexes)
def _triplet_random_sampling(features, targets):
anchor_indexes, pos_indexes, neg_indexes = [], [], []
targets = targets.cpu().numpy()
for target in targets:
target_indexes = np.where(target == targets)[0]
poss = np.random.choice(target_indexes, size=2, replace=len(target_indexes) < 2)
neg = np.random.choice(np.where(target != targets)[0], size=1)
anchor_indexes.append(poss[0])
pos_indexes.append(poss[1])
neg_indexes.append(neg[0])
assert len(anchor_indexes) == len(pos_indexes) == len(neg_indexes)
anchor_indexes = torch.tensor(anchor_indexes)
pos_indexes = torch.tensor(pos_indexes)
neg_indexes = torch.tensor(neg_indexes)
return anchor_indexes, pos_indexes, neg_indexes
def _pairwise_distance(a, squared=False):
"""Computes the pairwise distance matrix with numerical stability."""
pairwise_distances_squared = torch.add(
a.pow(2).sum(dim=1, keepdim=True).expand(a.size(0), -1),
torch.t(a).pow(2).sum(dim=0, keepdim=True).expand(a.size(0), -1)
) - 2 * (torch.mm(a, torch.t(a)))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = torch.clamp(pairwise_distances_squared, min=0.0)
# Get the mask where the zero distances are at.
error_mask = torch.le(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = torch.sqrt(pairwise_distances_squared + error_mask.float() * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = torch.mul(pairwise_distances, (error_mask == False).float())
# Explicitly set diagonals to zero.
mask_offdiagonals = 1 - torch.eye(*pairwise_distances.size(), device=pairwise_distances.device)
pairwise_distances = torch.mul(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def _pair_distance(a, b, distance_type="l2"):
if distance_type == "l2":
return F.pairwise_distance(a, b, p=2)
if distance_type == "l2squared":
return torch.pow(F.pairwise_distance(a, b, p=2), 2)
elif distance_type == "l1":
return F.pairwise_distance(a, b, p=1)
elif distance_type == "cosine":
return 1 - torch.cosine_similarity(a, b)
raise ValueError("Unknown distance type {}.".format(distance_type))
| 13,211 | 34.045093 | 102 | py |
AFC | AFC-master/inclearn/lib/losses/distillation.py | import functools
import math
import torch
from torch.nn import functional as F
from inclearn.lib import vizualization
def mer_loss(new_logits, old_logits):
"""Distillation loss that is less important if the new model is unconfident.
Reference:
* Kim et al.
Incremental Learning with Maximum Entropy Regularization: Rethinking
Forgetting and Intransigence.
:param new_logits: Logits from the new (student) model.
:param old_logits: Logits from the old (teacher) model.
:return: A float scalar loss.
"""
new_probs = F.softmax(new_logits, dim=-1)
old_probs = F.softmax(old_logits, dim=-1)
return torch.mean(((new_probs - old_probs) * torch.log(new_probs)).sum(-1), dim=0)
def pod(
list_attentions_a,
list_attentions_b,
collapse_channels="spatial",
normalize=True,
memory_flags=None,
only_old=False,
feature_distil_factor=None,
**kwargs
):
"""Pooled Output Distillation.
Reference:
* Douillard et al.
Small Task Incremental Learning.
arXiv 2020.
:param list_attentions_a: A list of attention maps, each of shape (b, n, w, h).
:param list_attentions_b: A list of attention maps, each of shape (b, n, w, h).
:param collapse_channels: How to pool the channels.
:param memory_flags: Integer flags denoting exemplars.
:param only_old: Only apply loss to exemplars.
:return: A float scalar loss.
"""
assert len(list_attentions_a) == len(list_attentions_b)
loss = torch.tensor(0.).to(list_attentions_a[0].device)
for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
# shape of (b, n, w, h)
assert a.shape == b.shape, (a.shape, b.shape)
if only_old:
a = a[memory_flags]
b = b[memory_flags]
if len(a) == 0:
continue
a = torch.pow(a, 2)
b = torch.pow(b, 2)
if collapse_channels == "channels":
a = a.sum(dim=1).view(a.shape[0], -1) # shape of (b, w * h)
b = b.sum(dim=1).view(b.shape[0], -1)
elif collapse_channels == "width":
a = a.sum(dim=2).view(a.shape[0], -1) # shape of (b, c * h)
b = b.sum(dim=2).view(b.shape[0], -1)
elif collapse_channels == "height":
a = a.sum(dim=3).view(a.shape[0], -1) # shape of (b, c * w)
b = b.sum(dim=3).view(b.shape[0], -1)
elif collapse_channels == "gap":
a = F.adaptive_avg_pool2d(a, (1, 1))[..., 0, 0]
b = F.adaptive_avg_pool2d(b, (1, 1))[..., 0, 0]
elif collapse_channels == "spatial":
a_h = a.sum(dim=3).view(a.shape[0], -1)
b_h = b.sum(dim=3).view(b.shape[0], -1)
a_w = a.sum(dim=2).view(a.shape[0], -1)
b_w = b.sum(dim=2).view(b.shape[0], -1)
a = torch.cat([a_h, a_w], dim=-1)
b = torch.cat([b_h, b_w], dim=-1)
elif collapse_channels == "pixel":
a = a.view(a.shape[0], a.shape[1], -1)
b = b.view(b.shape[0], b.shape[1], -1)
else:
raise ValueError("Unknown method to collapse: {}".format(collapse_channels))
if normalize:
if collapse_channels == "pixel":
a = F.normalize(a, dim=2, p=2)
b = F.normalize(b, dim=2, p=2)
else:
a = F.normalize(a, dim=1, p=2)
b = F.normalize(b, dim=1, p=2)
if feature_distil_factor is None:
layer_loss = torch.mean(torch.frobenius_norm(a - b, dim=-1))
else:
factor = feature_distil_factor[i].reshape([1,-1])
layer_loss = torch.mean(factor * torch.frobenius_norm(a - b, dim=-1))
loss += layer_loss
return loss / len(list_attentions_a)
def spatial_pyramid_pooling(
list_attentions_a,
list_attentions_b,
levels=[1, 2],
pool_type="avg",
weight_by_level=True,
normalize=True,
**kwargs
):
loss = torch.tensor(0.).to(list_attentions_a[0].device)
for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
# shape of (b, n, w, h)
assert a.shape == b.shape
a = torch.pow(a, 2)
b = torch.pow(b, 2)
for j, level in enumerate(levels):
if level > a.shape[2]:
raise ValueError(
"Level {} is too big for spatial dim ({}, {}).".format(
level, a.shape[2], a.shape[3]
)
)
kernel_size = level // level
if pool_type == "avg":
a_pooled = F.avg_pool2d(a, (kernel_size, kernel_size))
b_pooled = F.avg_pool2d(b, (kernel_size, kernel_size))
elif pool_type == "max":
a_pooled = F.max_pool2d(a, (kernel_size, kernel_size))
b_pooled = F.max_pool2d(b, (kernel_size, kernel_size))
else:
raise ValueError("Invalid pool type {}.".format(pool_type))
a_features = a_pooled.view(a.shape[0], -1)
b_features = b_pooled.view(b.shape[0], -1)
if normalize:
a_features = F.normalize(a_features, dim=-1)
b_features = F.normalize(b_features, dim=-1)
level_loss = torch.frobenius_norm(a_features - b_features, dim=-1).mean(0)
if weight_by_level: # Give less importance for smaller cells.
level_loss *= 1 / 2**j
loss += level_loss
return loss
def relative_teacher_distances(features_a, features_b, normalize=False, distance="l2", **kwargs):
"""Distillation loss between the teacher and the student comparing distances
instead of embeddings.
Reference:
* Lu Yu et al.
Learning Metrics from Teachers: Compact Networks for Image Embedding.
CVPR 2019.
:param features_a: ConvNet features of a model.
:param features_b: ConvNet features of a model.
:return: A float scalar loss.
"""
if normalize:
features_a = F.normalize(features_a, dim=-1, p=2)
features_b = F.normalize(features_b, dim=-1, p=2)
if distance == "l2":
p = 2
elif distance == "l1":
p = 1
else:
raise ValueError("Invalid distance for relative teacher {}.".format(distance))
pairwise_distances_a = torch.pdist(features_a, p=p)
pairwise_distances_b = torch.pdist(features_b, p=p)
return torch.mean(torch.abs(pairwise_distances_a - pairwise_distances_b))
def perceptual_features_reconstruction(list_attentions_a, list_attentions_b, factor=1.):
loss = 0.
for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
bs, c, w, h = a.shape
# a of shape (b, c, w, h) to (b, c * w * h)
a = a.view(bs, -1)
b = b.view(bs, -1)
a = F.normalize(a, p=2, dim=-1)
b = F.normalize(b, p=2, dim=-1)
layer_loss = (F.pairwise_distance(a, b, p=2)**2) / (c * w * h)
loss += torch.mean(layer_loss)
return factor * (loss / len(list_attentions_a))
def perceptual_style_reconstruction(list_attentions_a, list_attentions_b, factor=1.):
loss = 0.
for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
bs, c, w, h = a.shape
a = a.view(bs, c, w * h)
b = b.view(bs, c, w * h)
gram_a = torch.bmm(a, a.transpose(2, 1)) / (c * w * h)
gram_b = torch.bmm(b, b.transpose(2, 1)) / (c * w * h)
layer_loss = torch.frobenius_norm(gram_a - gram_b, dim=(1, 2))**2
loss += layer_loss.mean()
return factor * (loss / len(list_attentions_a))
def gradcam_distillation(gradients_a, gradients_b, activations_a, activations_b, factor=1):
"""Distillation loss between gradcam-generated attentions of two models.
References:
* Dhar et al.
Learning without Memorizing
CVPR 2019
:param base_logits: [description]
:param list_attentions_a: [description]
:param list_attentions_b: [description]
:param factor: [description], defaults to 1
:return: [description]
"""
attentions_a = _compute_gradcam_attention(gradients_a, activations_a)
attentions_b = _compute_gradcam_attention(gradients_b, activations_b)
assert len(attentions_a.shape) == len(attentions_b.shape) == 4
assert attentions_a.shape == attentions_b.shape
batch_size = attentions_a.shape[0]
flat_attention_a = F.normalize(attentions_a.view(batch_size, -1), p=2, dim=-1)
flat_attention_b = F.normalize(attentions_b.view(batch_size, -1), p=2, dim=-1)
distances = torch.abs(flat_attention_a - flat_attention_b).sum(-1)
return factor * torch.mean(distances)
def _compute_gradcam_attention(gradients, activations):
alpha = F.adaptive_avg_pool2d(gradients, (1, 1))
return F.relu(alpha * activations)
def mmd(x, y, sigmas=[1, 5, 10], normalize=False):
"""Maximum Mean Discrepancy with several Gaussian kernels."""
# Flatten:
x = x.view(x.shape[0], -1)
y = y.view(y.shape[0], -1)
if len(sigmas) == 0:
mean_dist = torch.mean(torch.pow(torch.pairwise_distance(x, y, p=2), 2))
factors = (-1 / (2 * mean_dist)).view(1, 1, 1)
else:
factors = _get_mmd_factor(sigmas, x.device)
if normalize:
x = F.normalize(x, p=2, dim=1)
y = F.normalize(y, p=2, dim=1)
xx = torch.pairwise_distance(x, x, p=2)**2
yy = torch.pairwise_distance(y, y, p=2)**2
xy = torch.pairwise_distance(x, y, p=2)**2
k_xx, k_yy, k_xy = 0, 0, 0
div = 1 / (x.shape[1]**2)
k_xx = div * torch.exp(factors * xx).sum(0).squeeze()
k_yy = div * torch.exp(factors * yy).sum(0).squeeze()
k_xy = div * torch.exp(factors * xy).sum(0).squeeze()
mmd_sq = torch.sum(k_xx) - 2 * torch.sum(k_xy) + torch.sum(k_yy)
return torch.sqrt(mmd_sq)
@functools.lru_cache(maxsize=1, typed=False)
def _get_mmd_factor(sigmas, device):
sigmas = torch.tensor(sigmas)[:, None, None].to(device).float()
sigmas = -1 / (2 * sigmas)
return sigmas
def similarity_per_class(
features,
targets,
goal_features,
goal_targets,
epoch,
epochs,
memory_flags,
old_centroids_features=None,
old_centroids_targets=None,
factor=1.,
scheduled=False,
apply_centroids=True,
initial_centroids=False
):
loss = 0.
counter = 0
# We only keep new classes, no classes stored in memory
indexes = ~memory_flags.bool()
features = features[indexes]
targets = targets[indexes].to(features.device)
for target in torch.unique(targets):
sub_features = features[targets == target]
sub_goal_features = goal_features[goal_targets == target]
if apply_centroids:
sub_goal_features = sub_goal_features.mean(dim=0, keepdims=True)
# We want the new real features to be similar to their old alter-ego ghosts:
similarities = torch.mm(
F.normalize(sub_features, dim=1, p=2),
F.normalize(sub_goal_features, dim=1, p=2).T
)
loss += torch.clamp((1 - similarities).sum(), min=0.)
counter += len(sub_features)
if initial_centroids:
# But we also want that the new real features stay close to what the
# trained ConvNet though was best as first initialization:
sub_centroids = old_centroids_features[old_centroids_targets == target]
similarities = torch.mm(
F.normalize(sub_features, dim=1, p=2), F.normalize(sub_centroids.T, dim=1, p=2)
)
loss += torch.clamp((1 - similarities).sum(), min=0.)
counter += len(sub_features)
if counter == 0:
return 0.
loss = factor * (loss / counter)
if scheduled:
loss = (1 - epoch / epochs) * loss
if loss < 0.:
raise ValueError(f"Negative loss value for PLC! (epoch={epoch}, epochs={epochs})")
return loss
def semantic_drift_compensation(old_features, new_features, targets, sigma=0.2):
"""Returns SDC drift.
# References:
* Semantic Drift Compensation for Class-Incremental Learning
Lu Yu et al.
CVPR 2020
"""
assert len(old_features) == len(new_features)
with torch.no_grad():
delta = new_features - old_features
denominator = 1 / (2 * sigma**2)
drift = torch.zeros(new_features.shape[1]).float().to(new_features.device)
summed_w = 0.
for target in torch.unique(targets):
indexes = target == targets
old_features_class = old_features[indexes]
# Computing w, aka a weighting measuring how much an example
# is representative based on its distance to the class mean.
numerator = old_features_class - old_features_class.mean(dim=0)
numerator = torch.pow(torch.norm(numerator, dim=1), 2)
w = torch.exp(-numerator / denominator)
tmp = (w[..., None] * delta[indexes])
drift = drift + tmp.sum(dim=0)
summed_w = summed_w + w.sum()
drift = drift / summed_w
return drift
| 13,152 | 32.047739 | 97 | py |
AFC | AFC-master/inclearn/lib/losses/regularizations.py | import functools
import numpy as np
import torch
from torch.nn import functional as F
from inclearn.lib import utils
def weights_orthogonality(weights, margin=0.):
"""Regularization forcing the weights to be disimilar.
:param weights: Learned parameters of shape (n_classes, n_features).
:param margin: Margin to force even more the orthogonality.
:return: A float scalar loss.
"""
normalized_weights = F.normalize(weights, dim=1, p=2)
similarities = torch.mm(normalized_weights, normalized_weights.t())
# We are ignoring the diagonal made of identity similarities:
similarities = similarities[torch.eye(similarities.shape[0]) == 0]
return torch.mean(F.relu(similarities + margin))
def ortho_reg(weights, config):
"""Regularization forcing the weights to be orthogonal without removing negative
correlation.
Reference:
* Regularizing CNNs with Locally Constrained Decorrelations
Pau et al.
ICLR 2017
:param weights: Learned parameters of shape (n_classes, n_features).
:return: A float scalar loss.
"""
normalized_weights = F.normalize(weights, dim=1, p=2)
similarities = torch.mm(normalized_weights, normalized_weights.t())
# We are ignoring the diagonal made of identity similarities:
similarities = similarities[torch.eye(similarities.shape[0]) == 0]
x = config.get("lambda", 10.) * (similarities - 1.)
return config.get("factor", 1.) * torch.log(1 + torch.exp(x)).sum()
def global_orthogonal_regularization(
features, targets, factor=1., normalize=False, cosine=False
):
"""Global Orthogonal Regularization (GOR) forces features of different
classes to be orthogonal.
# Reference:
* Learning Spread-out Local Feature Descriptors.
Zhang et al.
ICCV 2016.
:param features: A flattened extracted features.
:param targets: Sparse targets.
:return: A float scalar loss.
"""
if normalize:
features = F.normalize(features, dim=1, p=2)
positive_indexes, negative_indexes = [], []
targets = targets.cpu().numpy()
unique_targets = set(targets)
if len(unique_targets) == 1:
return torch.tensor(0.)
for target in unique_targets:
positive_index = np.random.choice(np.where(targets == target)[0], 1)
negative_index = np.random.choice(np.where(targets != target)[0], 1)
positive_indexes.append(positive_index)
negative_indexes.append(negative_index)
assert len(positive_indexes) == len(negative_indexes)
if len(positive_indexes) == 0:
return 0.
positive_indexes = torch.LongTensor(positive_indexes)
negative_indexes = torch.LongTensor(negative_indexes)
positive_features = features[positive_indexes]
negative_features = features[negative_indexes]
if cosine:
similarities = F.cosine_similarity(positive_features, negative_features)
else:
similarities = torch.sum(torch.mul(positive_features, negative_features), 1)
features_dim = features.shape[1]
first_moment = torch.mean(similarities)
second_moment = torch.mean(torch.pow(similarities, 2))
loss = torch.pow(first_moment, 2) + torch.clamp(second_moment - 1. / features_dim, min=0.)
return factor * loss
def double_soft_orthoreg(weights, config):
"""Extention of the Soft Ortogonality reg, forces the Gram matrix of the
weight matrix to be close to identity.
Also called DSO.
References:
* Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?
Bansal et al.
NeurIPS 2018
:param weights: Learned parameters of shape (n_classes, n_features).
:return: A float scalar loss.
"""
wTw = torch.mm(weights.t(), weights)
so_1 = torch.frobenius_norm(wTw - torch.eye(wTw.shape[0]).to(weights.device))
wwT = torch.mm(weights, weights.t())
so_2 = torch.frobenius_norm(wwT - torch.eye(wwT.shape[0]).to(weights.device))
if config["squared"]:
so_1 = torch.pow(so_1, 2)
so_2 = torch.pow(so_2, 2)
return config["factor"] * (so_1 + so_2)
def mutual_coherence_regularization(weights, config):
"""Forces weights orthogonality by reducing the highest correlation between
the weights.
Also called MC.
References:
* Compressed sensing
David L Donoho.
Transactions on information theory 2016
:param weights: Learned parameters of shape (n_classes, n_features).
:return: A float scalar loss.
"""
wTw = torch.mm(weights.t(), weights)
x = wTw - torch.eye(wTw.shape[0]).to(weights.device)
loss = utils.matrix_infinity_norm(x)
return config["factor"] * loss
def spectral_restricted_isometry_property_regularization(weights, config):
"""Requires that every set of columns of the weights, with cardinality no
larger than k, shall behave like an orthogonal system.
Also called SRIP.
References:
* Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?
Bansal et al.
NeurIPS 2018
:param weights: Learned parameters of shape (n_classes, n_features).
:return: A float scalar loss.
"""
wTw = torch.mm(weights.t(), weights)
x = wTw - torch.eye(wTw.shape[0]).to(weights.device)
_, s, _ = torch.svd(x)
loss = s[0]
return config["factor"] * loss
def softriple_regularizer(weights, config):
weights = F.normalize(weights)
K = config["K"]
C = weights.shape[0] // K
centers_per_class = weights.view(C, K, -1)
triu_indexes = np.triu_indices(K, 1)
indexes_0, indexes_1 = torch.tensor(triu_indexes[0]), torch.tensor(triu_indexes[1])
similarities = torch.bmm(centers_per_class, centers_per_class.transpose(2, 1))
x = torch.abs(2 - 2 * similarities[..., indexes_0, indexes_1])
x = torch.sqrt(x + 1e-10)
loss = x.sum() / (C * K * (K - 1))
return config["factor"] * loss
def double_margin_constrastive_regularization(
weights,
current_index,
K=None,
intra_margin=0.2,
inter_margin=0.8,
regroup_intra=False,
inter_old_vs_new=False,
normalize=True,
intra_aggreg="mean",
inter_aggreg="mean",
square=True,
old_weights=None,
adaptative_margin=False,
adaptative_margin_max=2.0,
adaptative_margin_min=0.5,
factor=1.
):
"""To be used with multiple centers per class. Enforce that weights of different
classes are further than a given margin intra_margin and weights of same class
are close but still further than a margin inter_margin.
intra_margin must be > than inter_margin.
Note that distance range is:
* [0, 2] if squared
* [0, 1.41] otherwise
Therefore while the intra_margin should be kept low, the inter_dist if set
higher than the upper bound will force perfect orthogonality.
:param weights: Learned parameters of shape (n_classes * n_clusters, n_features).
:param current_index: The current weight index, i.e. if we have learned N classes, the index
will be N.
:param K: Number of clusters per class.
:param intra_margin: Margin between clusters of same class.
:param inter_margin: Margin between clusters of different classes.
:param inter_old_vs_new: Apply the inter distance only between old & new.
:param factor: A multiplicative factor applied to the loss.
:return: A float scalar loss.
"""
if intra_margin is None and inter_margin is None:
raise ValueError("At least one margin must be enabled.")
if normalize:
weights = F.normalize(weights)
C = weights.shape[0] // K
dist = _dmr_weights_distance(weights, square=square)
loss = 0.
if intra_margin is not None and K > 1:
intra_mask = _dmr_intra_mask(dist.shape[0], C, K, weights.device)
intra_dist = _index_mask(dist, intra_mask)
if regroup_intra:
intra_losses = torch.clamp(intra_dist - intra_margin, min=0.)
else:
intra_losses = torch.clamp(intra_margin - intra_dist, min=0.)
intra_loss = _dmr_aggreg(intra_losses, aggreg_mode=intra_aggreg)
loss += intra_loss
if inter_margin is not None and not (inter_old_vs_new and current_index == 0):
if inter_old_vs_new:
inter_mask = _dmr_inter_oldvsnew_mask(dist.shape[0], current_index, weights.device)
inter_dist = dist[inter_mask]
elif adaptative_margin and old_weights is not None:
old_dist = _dmr_weights_distance(old_weights, square=square).to(weights.device)
nb_old_classes = old_weights.shape[0] // K
inter_mask_old = _dmr_inter_mask(old_dist.shape[0], nb_old_classes, K, weights.device)
inter_mask_oldnew = _dmr_inter_mask(dist.shape[0], C, K, weights.device)
inter_mask_oldnew[nb_old_classes * K:] = False
inter_mask_oldnew[..., nb_old_classes * K:] = False
inter_mask_new = _dmr_inter_mask(dist.shape[0], C, K, weights.device)
inter_mask_new[:nb_old_classes * K, :nb_old_classes * K] = False
old_inter_dist = _index_mask(old_dist, inter_mask_old)
d = torch.clamp(old_inter_dist, min=0.)
adaptative_margins = (
(adaptative_margin_max - adaptative_margin_min) / torch.max(d)
) * d + adaptative_margin_min
oldnew_inter_dist = _index_mask(dist, inter_mask_oldnew)
new_inter_dist = _index_mask(dist, inter_mask_new)
inter_dist = torch.cat((oldnew_inter_dist, new_inter_dist))
inter_margin = torch.cat(
(
adaptative_margins, torch.tensor(inter_margin).repeat(len(new_inter_dist)
).to(weights.device)
)
)
assert len(oldnew_inter_dist) == len(old_inter_dist) == len(adaptative_margins)
else:
inter_mask = _dmr_inter_mask(dist.shape[0], C, K, weights.device)
inter_dist = _index_mask(dist, inter_mask)
if isinstance(inter_margin, float):
inter_losses = torch.clamp(inter_margin - inter_dist, min=0.)
inter_loss = _dmr_aggreg(inter_losses, aggreg_mode=inter_aggreg)
loss += inter_loss
elif inter_margin == "gor":
simi = -0.5 * (inter_dist - 2)
first_moment = torch.mean(simi)
second_moment = torch.mean(torch.pow(simi, 2))
inter_loss = torch.pow(first_moment,
2) + torch.clamp(second_moment - 1. / weights.shape[-1], min=0.)
loss += inter_loss
elif inter_margin == "simi":
if square:
inter_dist = torch.pow(inter_dist, 2)
simi = torch.abs(-0.5 * (inter_dist - 2))
inter_loss = _dmr_aggreg(simi, aggreg_mode="adamine")
loss += inter_loss
else:
assert False, inter_margin
if isinstance(loss, float):
loss = torch.tensor(0.).to(weights.device)
return factor * loss
def _index_mask(tensor, mask):
return torch.masked_select(tensor, mask)
@functools.lru_cache(maxsize=64, typed=False)
def _dmr_inter_mask(size, nb_classes, nb_clusters, device):
inter_mask = ~torch.ones(size, size).bool()
lower_tri = torch.tensor(np.tril_indices(size, k=0))
for c in range(nb_classes):
inter_mask[c * nb_clusters:(c + 1) * nb_clusters, (c + 1) * nb_clusters:] = True
inter_mask[lower_tri[0], lower_tri[1]] = False
return inter_mask.to(device)
@functools.lru_cache(maxsize=64, typed=False)
def _dmr_inter_oldvsnew_mask(size, current_index, device):
inter_mask = ~torch.ones(size, size).bool()
lower_tri = torch.tensor(np.tril_indices(size, k=0))
inter_mask[:current_index, current_index:] = True
inter_mask[lower_tri[0], lower_tri[1]] = False
return inter_mask.to(device)
@functools.lru_cache(maxsize=64, typed=False)
def _dmr_intra_mask(size, nb_classes, nb_clusters, device):
intra_mask = ~torch.ones(size, size).bool()
lower_tri = torch.tensor(np.tril_indices(size, k=0))
for c in range(nb_classes):
intra_mask[c * nb_clusters:(c + 1) * nb_clusters, :(c + 1) * nb_clusters] = True
intra_mask[lower_tri[0], lower_tri[1]] = False
return intra_mask.to(device)
def _dmr_weights_distance(weights, square=True):
dist = 2 - 2 * torch.mm(weights, weights.t())
dist = torch.abs(dist) # Absolute is to handle small negatives due to numerical instability
if not square:
dist = torch.sqrt(torch.abs(dist) + 1e-10)
return dist
def _dmr_aggreg(losses, aggreg_mode="mean"):
if aggreg_mode == "mean":
return torch.mean(losses)
elif aggreg_mode == "max":
return torch.max(losses)
elif aggreg_mode == "adamine":
return _adamine(losses)
raise NotImplementedError("Unknown aggreg mode {}.".format(aggreg_mode))
def _adamine(losses):
nb_not_neg = max(len(torch.nonzero(losses)), 1.0)
return losses.sum() / nb_not_neg
def double_margin_constrastive_regularization_features(
features, targets, intra_margin=0.2, inter_margin=0.8
):
pos_tuples, neg_tuples = [], []
np_targets = targets.cpu().numpy()
for i, t in enumerate(range(np_targets)):
indexes_similar = np.where(targets == t)[0]
indexes_disimilar = np.where(targets != t)[0]
if len(indexes_similar):
pos = np.random.choice(indexes_similar)
pos_tuple.append((i, pos))
if len(indexes_disimilar):
neg = np.random.choice(indexes_disimilar)
neg_tuple.append((i, neg))
| 13,746 | 33.027228 | 99 | py |
caffe-model | caffe-model-master/nin.py | from caffe import layers as L
from caffe import params as P
import caffe
def conv_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
relu = L.ReLU(conv, in_place=True)
return conv, relu
def fc_relu_drop(bottom, fc_num_output=4096, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=fc_num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True, dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
relu = L.ReLU(conv, in_place=True)
return conv, bn, scale, relu
def accuracy_top1_top5(bottom, label):
accuracy_top1 = L.Accuracy(bottom, label, include=dict(phase=1))
accuracy_top5 = L.Accuracy(bottom, label, include=dict(phase=1), accuracy_param=dict(top_k=5))
return accuracy_top1, accuracy_top5
class NIN(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def nin_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.relu0 = conv_relu(n.data, num_output=96, kernel_size=11, stride=4) # 96x53x53
n.cccp1, n.relu1 = conv_relu(n.conv1, num_output=96, kernel_size=1, stride=1)
n.cccp2, n.relu2 = conv_relu(n.cccp1, num_output=96, kernel_size=1, stride=1)
n.pool1 = L.Pooling(n.cccp2, pool=P.Pooling.MAX, kernel_size=3, stride=2) # 96x26x26
n.conv2, n.relu3 = conv_relu(n.pool1, num_output=256, kernel_size=5, stride=1, pad=2) # 256x26x26
n.cccp3, n.relu4 = conv_relu(n.conv2, num_output=256, kernel_size=1, stride=1)
n.cccp4, n.relu5 = conv_relu(n.cccp3, num_output=256, kernel_size=1, stride=1)
n.pool2 = L.Pooling(n.cccp4, pool=P.Pooling.MAX, kernel_size=3, stride=2) # 256x13x13
n.conv3, n.relu6 = conv_relu(n.pool2, num_output=384, kernel_size=3, stride=1, pad=1) # 384x13x13
n.cccp5, n.relu7 = conv_relu(n.conv3, num_output=384, kernel_size=1, stride=1)
n.cccp6, n.relu8 = conv_relu(n.cccp5, num_output=384, kernel_size=1, stride=1)
n.pool3 = L.Pooling(n.cccp6, pool=P.Pooling.MAX, kernel_size=3, stride=2) # 384x6x6
n.drop7 = L.Dropout(n.pool3, in_place=True, dropout_param=dict(dropout_ratio=0.5))
n.conv4, n.relu9 = conv_relu(n.pool3, num_output=1024, kernel_size=3, stride=1, pad=1) # 1024x6x6
n.cccp7, n.relu10 = conv_relu(n.conv4, num_output=1024, kernel_size=1, stride=1)
n.cccp8, n.relu11 = conv_relu(n.cccp7, num_output=1024, kernel_size=1, stride=1)
n.pool4 = L.Pooling(n.cccp8, pool=P.Pooling.AVE, kernel_size=6, stride=1) # 1024x1x1
n.classifier = L.InnerProduct(n.pool4, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.pool4, n.label)
return n.to_proto()
def nin_bn_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.bn0, n.scale0, n.relu0 = conv_bn_scale_relu(n.data, num_output=96, kernel_size=11, stride=4)
n.cccp1, n.bn1, n.scale1, n.relu1 = conv_bn_scale_relu(n.conv1, num_output=96, kernel_size=1, stride=1)
n.cccp2, n.bn2, n.scale2, n.relu2 = conv_bn_scale_relu(n.cccp1, num_output=96, kernel_size=1, stride=1)
n.pool1 = L.Pooling(n.cccp2, pool=P.Pooling.MAX, kernel_size=3, stride=2) # 96x26x26
n.conv2, n.bn3, n.scale3, n.relu3 = conv_bn_scale_relu(n.pool1, num_output=256, kernel_size=5, stride=1, pad=2)
n.cccp3, n.bn4, n.scale4, n.relu4 = conv_bn_scale_relu(n.conv2, num_output=256, kernel_size=1, stride=1)
n.cccp4, n.bn5, n.scale5, n.relu5 = conv_bn_scale_relu(n.cccp3, num_output=256, kernel_size=1, stride=1)
n.pool2 = L.Pooling(n.cccp4, pool=P.Pooling.MAX, kernel_size=3, stride=2) # 256x13x13
n.conv3, n.bn6, n.scale6, n.relu6 = conv_bn_scale_relu(n.pool2, num_output=384, kernel_size=3, stride=1, pad=1)
n.cccp5, n.bn7, n.scale7, n.relu7 = conv_bn_scale_relu(n.conv3, num_output=384, kernel_size=1, stride=1)
n.cccp6, n.bn8, n.scale8, n.relu8 = conv_bn_scale_relu(n.cccp5, num_output=384, kernel_size=1, stride=1)
n.pool3 = L.Pooling(n.cccp6, pool=P.Pooling.MAX, kernel_size=3, stride=2) # 384x6x6
n.drop7 = L.Dropout(n.pool3, in_place=True, dropout_param=dict(dropout_ratio=0.5))
n.conv4, n.bn9, n.scale9, n.relu9 = conv_bn_scale_relu(n.pool3, num_output=1024, kernel_size=3, stride=1, pad=1)
n.cccp7, n.bn10, n.scale10, n.relu10 = conv_bn_scale_relu(n.conv4, num_output=1024, kernel_size=1, stride=1)
n.cccp8, n.bn11, n.scale11, n.relu11 = conv_bn_scale_relu(n.cccp7, num_output=1024, kernel_size=1, stride=1)
n.pool4 = L.Pooling(n.cccp8, pool=P.Pooling.AVE, kernel_size=6, stride=1) # 1024x1x1
n.classifier = L.InnerProduct(n.pool4, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.pool4, n.label)
return n.to_proto()
| 7,811 | 55.201439 | 120 | py |
caffe-model | caffe-model-master/inception_resnet.py | import caffe
from caffe import layers as L
from caffe import params as P
def fc_relu_drop(bottom, num_output=1024, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def factorization_conv_bn_scale(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
return conv, conv_bn, conv_scale
def factorization_conv_mxn(bottom, num_output=64, kernel_h=1, kernel_w=7, stride=1, pad_h=3, pad_w=0):
conv_mxn = L.Convolution(bottom, num_output=num_output, kernel_h=kernel_h, kernel_w=kernel_w, stride=stride,
pad_h=pad_h, pad_w=pad_w,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_mxn_bn = L.BatchNorm(conv_mxn, use_global_stats=False, in_place=True)
conv_mxn_scale = L.Scale(conv_mxn, scale_param=dict(bias_term=True), in_place=True)
conv_mxn_relu = L.ReLU(conv_mxn, in_place=True)
return conv_mxn, conv_mxn_bn, conv_mxn_scale, conv_mxn_relu
def eltwise_relu(bottom1, bottom2):
residual_eltwise = L.Eltwise(bottom1, bottom2, eltwise_param=dict(operation=1))
residual_eltwise_relu = L.ReLU(residual_eltwise, in_place=True)
return residual_eltwise, residual_eltwise_relu
def stem_resnet_v2_299x299(bottom):
"""
input:3x299x299
output:320x35x35
:param bottom: bottom layer
:return: layers
"""
conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=3, stride=2) # 32x149x149
conv2_3x3_s1, conv2_3x3_s1_bn, conv2_3x3_s1_scale, conv2_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv1_3x3_s2, num_output=32, kernel_size=3) # 32x147x147
conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv2_3x3_s1, num_output=64, kernel_size=3, pad=1) # 64x147x147
pool1_3x3_s2 = L.Pooling(conv3_3x3_s1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x73x73
conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(pool1_3x3_s2, num_output=80, kernel_size=1) # 80x73x73
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu = \
factorization_conv_bn_scale_relu(conv4_3x3_reduce, num_output=192, kernel_size=3) # 192x71x71
pool2_3x3_s2 = L.Pooling(conv4_3x3, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 192x35x35
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool2_3x3_s2, num_output=96, kernel_size=1) # 96x35x35
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu = \
factorization_conv_bn_scale_relu(pool2_3x3_s2, num_output=48, kernel_size=1) # 48x35x35
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu = \
factorization_conv_bn_scale_relu(conv_5x5_reduce, num_output=64, kernel_size=5, pad=2) # 64x35x35
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(pool2_3x3_s2, num_output=64, kernel_size=1) # 64x35x35
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=96, kernel_size=3, pad=1) # 96x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3, num_output=96, kernel_size=3, pad=1) # 96x35x35
ave_pool = L.Pooling(pool2_3x3_s2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 192x35x35
conv_1x1_ave, conv_1x1_ave_bn, conv_1x1_ave_scale, conv_1x1_ave_relu = \
factorization_conv_bn_scale_relu(ave_pool, num_output=64, kernel_size=1) # 64x35x35
concat = L.Concat(conv_1x1, conv_5x5, conv_3x3_2, conv_1x1_ave) # 320(96+64+96+64)x35x35
return conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu, conv2_3x3_s1, conv2_3x3_s1_bn, \
conv2_3x3_s1_scale, conv2_3x3_s1_relu, conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu, \
pool1_3x3_s2, conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu, \
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu, pool2_3x3_s2, conv_1x1, conv_1x1_bn, conv_1x1_scale, \
conv_1x1_relu, conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu, \
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, \
conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2, conv_3x3_2_bn, \
conv_3x3_2_scale, conv_3x3_2_relu, ave_pool, conv_1x1_ave, conv_1x1_ave_bn, conv_1x1_ave_scale, conv_1x1_ave_relu, \
concat
def inception_resnet_v2_a(bottom):
"""
input:320x35x35
output:320x35x35
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1) # 32x35x35
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1) # 32x35x35
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=32, kernel_size=3, pad=1) # 32x35x35
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1) # 32x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=48, kernel_size=3, pad=1) # 48x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=64, kernel_size=3, pad=1) # 64x35x35
concat = L.Concat(conv_1x1, conv_3x3, conv_3x3_3) # 128(32+32+64)x35x35
conv_up, conv_up_bn, conv_up_scale = \
factorization_conv_bn_scale(concat, num_output=320, kernel_size=1) # 320x35x35
residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up) # 320x35x35
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_bn, \
conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, \
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, \
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, \
conv_3x3_3_relu, concat, conv_up, conv_up_bn, conv_up_scale, residual_eltwise, residual_eltwise_relu
def reduction_resnet_v2_a(bottom):
"""
input:320x35x35
output:1088x17x17
:param bottom: bottom layer
:return: layers
"""
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=3, stride=2) # 384x17x17
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=256, kernel_size=3, stride=1, pad=1) # 256x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=384, kernel_size=3, stride=2) # 384x17x17
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 320x17x17
concat = L.Concat(conv_3x3, conv_3x3_3, pool) # 1088(320+384+384)x17x17
return conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, pool, concat
def inception_resnet_v2_b(bottom):
"""
input:1088x17x17
output:1088x17x17
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=128, kernel_size=1) # 128x17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=160, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 160x17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 192x17x17
concat = L.Concat(conv_1x1, conv_7x1) # 384(192+192)x17x17
conv_up, conv_up_bn, conv_up_scale = \
factorization_conv_bn_scale(concat, num_output=1088, kernel_size=1) # 1088x17x17
residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up) # 1088x17x17
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x7_reduce, conv_1x7_reduce_bn, \
conv_1x7_reduce_scale, conv_1x7_reduce_relu, conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, \
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, concat, conv_up, conv_up_bn, conv_up_scale, \
residual_eltwise, residual_eltwise_relu
def reduction_resnet_v2_b(bottom):
"""
input:1088x17x17
output:2080x8x8
:param bottom: bottom layer
:return: layers
"""
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=384, kernel_size=3, stride=2) # 384x8x8
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x17x17
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=288, kernel_size=3, stride=2) # 288x8x8
conv_3x3_3_reduce, conv_3x3_3_reduce_bn, conv_3x3_3_reduce_scale, conv_3x3_3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x17x17
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_3_reduce, num_output=288, kernel_size=3, pad=1) # 288x17x17
conv_3x3_4, conv_3x3_4_bn, conv_3x3_4_scale, conv_3x3_4_relu = \
factorization_conv_bn_scale_relu(conv_3x3_3, num_output=320, kernel_size=3, stride=2) # 320x8x8
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 1088x8x8
concat = L.Concat(conv_3x3, conv_3x3_2, conv_3x3_4, pool) # 2080(1088+384+288+320)x8x8
return conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, \
conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
conv_3x3_2_relu, conv_3x3_3_reduce, conv_3x3_3_reduce_bn, conv_3x3_3_reduce_scale, conv_3x3_3_reduce_relu, \
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, conv_3x3_4, conv_3x3_4_bn, conv_3x3_4_scale, \
conv_3x3_4_relu, pool, concat
def inception_resnet_v2_c(bottom):
"""
input:2080x8x8
output:2080x8x8
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x8x8
conv_1x3_reduce, conv_1x3_reduce_bn, conv_1x3_reduce_scale, conv_1x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x8x8
conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu = \
factorization_conv_mxn(conv_1x3_reduce, num_output=224, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 224x8x8
conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu = \
factorization_conv_mxn(conv_1x3, num_output=256, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 256x8x8
concat = L.Concat(conv_1x1, conv_3x1) # 448(192+256)x8x8
conv_up, conv_up_bn, conv_up_scale = \
factorization_conv_bn_scale(concat, num_output=2080, kernel_size=1) # 2080x8x8
residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up) # 2080x8x8
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x3_reduce, conv_1x3_reduce_bn, \
conv_1x3_reduce_scale, conv_1x3_reduce_relu, conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu, \
conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu, concat, conv_up, conv_up_bn, conv_up_scale, \
residual_eltwise, residual_eltwise_relu
string_a = 'n.inception_resnet_v2_a(order)_1x1, n.inception_resnet_v2_a(order)_1x1_bn, n.inception_resnet_v2_a(order)_1x1_scale, \
n.inception_resnet_v2_a(order)_1x1_relu, n.inception_resnet_v2_a(order)_3x3_reduce, n.inception_resnet_v2_a(order)_3x3_reduce_bn, \
n.inception_resnet_v2_a(order)_3x3_reduce_scale, n.inception_resnet_v2_a(order)_3x3_reduce_relu, n.inception_resnet_v2_a(order)_3x3, \
n.inception_resnet_v2_a(order)_3x3_bn, n.inception_resnet_v2_a(order)_3x3_scale, n.inception_resnet_v2_a(order)_3x3_relu, \
n.inception_resnet_v2_a(order)_3x3_2_reduce, n.inception_resnet_v2_a(order)_3x3_2_reduce_bn, n.inception_resnet_v2_a(order)_3x3_2_reduce_scale, \
n.inception_resnet_v2_a(order)_3x3_2_reduce_relu, n.inception_resnet_v2_a(order)_3x3_2, n.inception_resnet_v2_a(order)_3x3_2_bn, \
n.inception_resnet_v2_a(order)_3x3_2_scale, n.inception_resnet_v2_a(order)_3x3_2_relu, n.inception_resnet_v2_a(order)_3x3_3, \
n.inception_resnet_v2_a(order)_3x3_3_bn, n.inception_resnet_v2_a(order)_3x3_3_scale, n.inception_resnet_v2_a(order)_3x3_3_relu, \
n.inception_resnet_v2_a(order)_concat, n.inception_resnet_v2_a(order)_up, n.inception_resnet_v2_a(order)_up_bn, \
n.inception_resnet_v2_a(order)_up_scale, n.inception_resnet_v2_a(order)_residual_eltwise, \
n.inception_resnet_v2_a(order)_residual_eltwise_relu = \
inception_resnet_v2_a(bottom)'
string_b = 'n.inception_resnet_v2_b(order)_1x1, n.inception_resnet_v2_b(order)_1x1_bn, n.inception_resnet_v2_b(order)_1x1_scale, \
n.inception_resnet_v2_b(order)_1x1_relu, n.inception_resnet_v2_b(order)_1x7_reduce, n.inception_resnet_v2_b(order)_1x7_reduce_bn, \
n.inception_resnet_v2_b(order)_1x7_reduce_scale, n.inception_resnet_v2_b(order)_1x7_reduce_relu, n.inception_resnet_v2_b(order)_1x7, \
n.inception_resnet_v2_b(order)_1x7_bn, n.inception_resnet_v2_b(order)_1x7_scale, n.inception_resnet_v2_b(order)_1x7_relu, \
n.inception_resnet_v2_b(order)_7x1, n.inception_resnet_v2_b(order)_7x1_bn, n.inception_resnet_v2_b(order)_7x1_scale, \
n.inception_resnet_v2_b(order)_7x1_relu, n.inception_resnet_v2_b(order)_concat, n.inception_resnet_v2_b(order)_up, \
n.inception_resnet_v2_b(order)_up_bn, n.inception_resnet_v2_b(order)_up_scale, n.inception_resnet_v2_b(order)_residual_eltwise, \
n.inception_resnet_v2_b(order)_residual_eltwise_relu \
= inception_resnet_v2_b(bottom)'
string_c = 'n.inception_resnet_v2_c(order)_1x1, n.inception_resnet_v2_c(order)_1x1_bn, n.inception_resnet_v2_c(order)_1x1_scale, \
n.inception_resnet_v2_c(order)_1x1_relu, n.inception_resnet_v2_c(order)_1x3_reduce, n.inception_resnet_v2_c(order)_1x3_reduce_bn, \
n.inception_resnet_v2_c(order)_1x3_reduce_scale, n.inception_resnet_v2_c(order)_1x3_reduce_relu, n.inception_resnet_v2_c(order)_1x3, \
n.inception_resnet_v2_c(order)_1x3_bn, n.inception_resnet_v2_c(order)_1x3_scale, n.inception_resnet_v2_c(order)_1x3_relu, \
n.inception_resnet_v2_c(order)_3x1, n.inception_resnet_v2_c(order)_3x1_bn, n.inception_resnet_v2_c(order)_3x1_scale, \
n.inception_resnet_v2_c(order)_3x1_relu, n.inception_resnet_v2_c(order)_concat, n.inception_resnet_v2_c(order)_up, \
n.inception_resnet_v2_c(order)_up_bn, n.inception_resnet_v2_c(order)_up_scale, n.inception_resnet_v2_c(order)_residual_eltwise, \
n.inception_resnet_v2_c(order)_residual_eltwise_relu = \
inception_resnet_v2_c(bottom)'
class InceptionResNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def inception_resnet_v2_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=299, mean_value=[104, 117, 123], mirror=mirror))
# stem
n.conv1_3x3_s2, n.conv1_3x3_s2_bn, n.conv1_3x3_s2_scale, n.conv1_3x3_relu, n.conv2_3x3_s1, n.conv2_3x3_s1_bn, \
n.conv2_3x3_s1_scale, n.conv2_3x3_relu, n.conv3_3x3_s1, n.conv3_3x3_s1_bn, n.conv3_3x3_s1_scale, n.conv3_3x3_relu, \
n.pool1_3x3_s2, n.conv4_3x3_reduce, n.conv4_3x3_reduce_bn, n.conv4_3x3_reduce_scale, n.conv4_3x3_reduce_relu, \
n.conv4_3x3, n.conv4_3x3_bn, n.conv4_3x3_scale, n.conv4_relu_3x3, n.pool2_3x3_s2, n.conv5_1x1, n.conv5_1x1_bn, n.conv5_1x1_scale, \
n.conv5_1x1_relu, n.conv5_5x5_reduce, n.conv5_5x5_reduce_bn, n.conv5_5x5_reduce_scale, n.conv5_5x5_reduce_relu, \
n.conv5_5x5, n.conv5_5x5_bn, n.conv5_5x5_scale, n.conv5_5x5_relu, n.conv5_3x3_reduce, n.conv5_3x3_reduce_bn, n.conv5_3x3_reduce_scale, \
n.conv5_3x3_reduce_relu, n.conv5_3x3, n.conv5_3x3_bn, n.conv5_3x3_scale, n.conv5_3x3_relu, n.conv5_3x3_2, n.conv5_3x3_2_bn, \
n.conv5_3x3_2_scale, n.conv5_3x3_2_relu, n.ave_pool, n.conv5_1x1_ave, n.conv5_1x1_ave_bn, n.conv5_1x1_ave_scale, n.conv5_1x1_ave_relu, \
n.stem_concat = stem_resnet_v2_299x299(n.data) # 320x35x35
# 10 x inception_resnet_v2_a
for i in xrange(10):
if i == 0:
bottom = 'n.stem_concat'
else:
bottom = 'n.inception_resnet_v2_a(order)_residual_eltwise'.replace('(order)', str(i))
exec (string_a.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 384x35x35
# reduction_resnet_v2_a
n.reduction_a_3x3, n.reduction_a_3x3_bn, n.reduction_a_3x3_scale, n.reduction_a_3x3_relu, \
n.reduction_a_3x3_2_reduce, n.reduction_a_3x3_2_reduce_bn, n.reduction_a_3x3_2_reduce_scale, \
n.reduction_a_3x3_2_reduce_relu, n.reduction_a_3x3_2, n.reduction_a_3x3_2_bn, n.reduction_a_3x3_2_scale, \
n.reduction_a_3x3_2_relu, n.reduction_a_3x3_3, n.reduction_a_3x3_3_bn, n.reduction_a_3x3_3_scale, \
n.reduction_a_3x3_3_relu, n.reduction_a_pool, n.reduction_a_concat = \
reduction_resnet_v2_a(n.inception_resnet_v2_a10_residual_eltwise) # 1088x17x17
# 20 x inception_resnet_v2_b
for i in xrange(20):
if i == 0:
bottom = 'n.reduction_a_concat'
else:
bottom = 'n.inception_resnet_v2_b(order)_residual_eltwise'.replace('(order)', str(i))
exec (string_b.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 1088x17x17
# reduction_resnet_v2_b
n.reduction_b_3x3_reduce, n.reduction_b_3x3_reduce_bn, n.reduction_b_3x3_reduce_scale, \
n.reduction_b_3x3_reduce_relu, n.reduction_b_3x3, n.reduction_b_3x3_bn, n.reduction_b_3x3_scale, \
n.reduction_b_3x3_relu, n.reduction_b_3x3_2_reduce, n.reduction_b_3x3_2_reduce_bn, n.reduction_b_3x3_2_reduce_scale, \
n.reduction_b_3x3_2_reduce_relu, n.reduction_b_3x3_2, n.reduction_b_3x3_2_bn, n.reduction_b_3x3_2_scale, \
n.reduction_b_3x3_2_relu, n.reduction_b_3x3_3_reduce, n.reduction_b_3x3_3_reduce_bn, n.reduction_b_3x3_3_reduce_scale, \
n.reduction_b_3x3_3_reduce_relu, n.reduction_b_3x3_3, n.reduction_b_3x3_3_bn, n.reduction_b_3x3_3_scale, \
n.reduction_b_3x3_3_relu, n.reduction_b_3x3_4, n.reduction_b_3x3_4_bn, n.reduction_b_3x3_4_scale, \
n.reduction_b_3x3_4_relu, n.reduction_b_pool, n.reduction_b_concat = \
reduction_resnet_v2_b(n.inception_resnet_v2_b20_residual_eltwise) # 2080x8x8
# 9 x inception_resnet_v2_c
for i in xrange(9):
if i == 0:
bottom = 'n.reduction_b_concat'
else:
bottom = 'n.inception_resnet_v2_c(order)_residual_eltwise'.replace('(order)', str(i))
exec (string_c.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 2080x8x8
n.inception_resnet_v2_c10_1x1, n.inception_resnet_v2_c10_1x1_bn, n.inception_resnet_v2_c10_1x1_scale, \
n.inception_resnet_v2_c10_1x1_relu = \
factorization_conv_bn_scale_relu(n.inception_resnet_v2_c9_residual_eltwise, num_output=192,
kernel_size=1) # 192x8x8
n.inception_resnet_v2_c10_1x3_reduce, n.inception_resnet_v2_c10_1x3_reduce_bn, \
n.inception_resnet_v2_c10_1x3_reduce_scale, n.inception_resnet_v2_c10_1x3_reduce_relu = \
factorization_conv_bn_scale_relu(n.inception_resnet_v2_c9_residual_eltwise, num_output=192,
kernel_size=1) # 192x8x8
n.inception_resnet_v2_c10_1x3, n.inception_resnet_v2_c10_1x3_bn, n.inception_resnet_v2_c10_1x3_scale, \
n.inception_resnet_v2_c10_1x3_relu = \
factorization_conv_mxn(n.inception_resnet_v2_c10_1x3_reduce, num_output=224, kernel_h=1, kernel_w=3,
pad_h=0, pad_w=1) # 224x8x8
n.inception_resnet_v2_c10_3x1, n.inception_resnet_v2_c10_3x1_bn, n.inception_resnet_v2_c10_3x1_scale, \
n.inception_resnet_v2_c10_3x1_relu = \
factorization_conv_mxn(n.inception_resnet_v2_c10_1x3, num_output=256, kernel_h=3, kernel_w=1, pad_h=1,
pad_w=0) # 256x8x8
n.inception_resnet_v2_c10_concat = L.Concat(n.inception_resnet_v2_c10_1x1,
n.inception_resnet_v2_c10_3x1) # 448(192+256)x8x8
n.inception_resnet_v2_c10_up, n.inception_resnet_v2_c10_up_bn, n.inception_resnet_v2_c10_up_scale = \
factorization_conv_bn_scale(n.inception_resnet_v2_c10_concat, num_output=2080,
kernel_size=1) # 2080x8x8
n.inception_resnet_v2_c10_residual_eltwise = \
L.Eltwise(n.inception_resnet_v2_c9_residual_eltwise, n.inception_resnet_v2_c10_up,
eltwise_param=dict(operation=1)) # 2080x8x8
n.conv6_1x1, n.conv6_1x1_bn, n.conv6_1x1_scale, n.conv6_1x1_relu = \
factorization_conv_bn_scale_relu(n.inception_resnet_v2_c10_residual_eltwise, num_output=1536,
kernel_size=1) # 1536x8x8
n.pool_8x8_s1 = L.Pooling(n.conv6_1x1,
pool=P.Pooling.AVE,
global_pooling=True) # 1536x1x1
n.pool_8x8_s1_drop = L.Dropout(n.pool_8x8_s1, dropout_param=dict(dropout_ratio=0.2))
n.classifier = L.InnerProduct(n.pool_8x8_s1_drop, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 26,493 | 61.781991 | 153 | py |
caffe-model | caffe-model-master/resnet.py | import caffe
from caffe import layers as L
from caffe import params as P
def conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def conv_bn_scale(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
return conv, conv_bn, conv_scale
def eltwize_relu(bottom1, bottom2):
residual_eltwise = L.Eltwise(bottom1, bottom2, eltwise_param=dict(operation=1))
residual_eltwise_relu = L.ReLU(residual_eltwise, in_place=True)
return residual_eltwise, residual_eltwise_relu
def residual_branch(bottom, base_output=64):
"""
input:4*base_output x n x n
output:4*base_output x n x n
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
"""
branch2a, branch2a_bn, branch2a_scale, branch2a_relu = \
conv_bn_scale_relu(bottom, num_output=base_output, kernel_size=1) # base_output x n x n
branch2b, branch2b_bn, branch2b_scale, branch2b_relu = \
conv_bn_scale_relu(branch2a, num_output=base_output, kernel_size=3, pad=1) # base_output x n x n
branch2c, branch2c_bn, branch2c_scale = \
conv_bn_scale(branch2b, num_output=4 * base_output, kernel_size=1) # 4*base_output x n x n
residual, residual_relu = \
eltwize_relu(bottom, branch2c) # 4*base_output x n x n
return branch2a, branch2a_bn, branch2a_scale, branch2a_relu, branch2b, branch2b_bn, branch2b_scale, branch2b_relu, \
branch2c, branch2c_bn, branch2c_scale, residual, residual_relu
def residual_branch_shortcut(bottom, stride=2, base_output=64):
"""
:param stride: stride
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
"""
branch1, branch1_bn, branch1_scale = \
conv_bn_scale(bottom, num_output=4 * base_output, kernel_size=1, stride=stride)
branch2a, branch2a_bn, branch2a_scale, branch2a_relu = \
conv_bn_scale_relu(bottom, num_output=base_output, kernel_size=1, stride=stride)
branch2b, branch2b_bn, branch2b_scale, branch2b_relu = \
conv_bn_scale_relu(branch2a, num_output=base_output, kernel_size=3, pad=1)
branch2c, branch2c_bn, branch2c_scale = \
conv_bn_scale(branch2b, num_output=4 * base_output, kernel_size=1)
residual, residual_relu = \
eltwize_relu(branch1, branch2c) # 4*base_output x n x n
return branch1, branch1_bn, branch1_scale, branch2a, branch2a_bn, branch2a_scale, branch2a_relu, branch2b, \
branch2b_bn, branch2b_scale, branch2b_relu, branch2c, branch2c_bn, branch2c_scale, residual, residual_relu
branch_shortcut_string = 'n.res(stage)a_branch1, n.res(stage)a_branch1_bn, n.res(stage)a_branch1_scale, \
n.res(stage)a_branch2a, n.res(stage)a_branch2a_bn, n.res(stage)a_branch2a_scale, n.res(stage)a_branch2a_relu, \
n.res(stage)a_branch2b, n.res(stage)a_branch2b_bn, n.res(stage)a_branch2b_scale, n.res(stage)a_branch2b_relu, \
n.res(stage)a_branch2c, n.res(stage)a_branch2c_bn, n.res(stage)a_branch2c_scale, n.res(stage)a, n.res(stage)a_relu = \
residual_branch_shortcut((bottom), stride=(stride), base_output=(num))'
branch_string = 'n.res(stage)b(order)_branch2a, n.res(stage)b(order)_branch2a_bn, n.res(stage)b(order)_branch2a_scale, \
n.res(stage)b(order)_branch2a_relu, n.res(stage)b(order)_branch2b, n.res(stage)b(order)_branch2b_bn, \
n.res(stage)b(order)_branch2b_scale, n.res(stage)b(order)_branch2b_relu, n.res(stage)b(order)_branch2c, \
n.res(stage)b(order)_branch2c_bn, n.res(stage)b(order)_branch2c_scale, n.res(stage)b(order), n.res(stage)b(order)_relu = \
residual_branch((bottom), base_output=(num))'
class ResNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def resnet_layers_proto(self, batch_size, phase='TRAIN', stages=(3, 4, 6, 3)):
"""
:param batch_size: the batch_size of train and test phase
:param phase: TRAIN or TEST
:param stages: the num of layers = 2 + 3*sum(stages), layers would better be chosen from [50, 101, 152]
{every stage is composed of 1 residual_branch_shortcut module and stage[i]-1 residual_branch
modules, each module consists of 3 conv layers}
(3, 4, 6, 3) for 50 layers; (3, 4, 23, 3) for 101 layers; (3, 8, 36, 3) for 152 layers
"""
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
conv_bn_scale_relu(n.data, num_output=64, kernel_size=7, stride=2, pad=3) # 64x112x112
n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x56x56
for num in xrange(len(stages)): # num = 0, 1, 2, 3
for i in xrange(stages[num]):
if i == 0:
stage_string = branch_shortcut_string
bottom_string = ['n.pool1', 'n.res2b%s' % str(stages[0] - 1), 'n.res3b%s' % str(stages[1] - 1),
'n.res4b%s' % str(stages[2] - 1)][num]
else:
stage_string = branch_string
if i == 1:
bottom_string = 'n.res%sa' % str(num + 2)
else:
bottom_string = 'n.res%sb%s' % (str(num + 2), str(i - 1))
exec (stage_string.replace('(stage)', str(num + 2)).replace('(bottom)', bottom_string).
replace('(num)', str(2 ** num * 64)).replace('(order)', str(i)).
replace('(stride)', str(int(num > 0) + 1)))
exec 'n.pool5 = L.Pooling((bottom), pool=P.Pooling.AVE, global_pooling=True)'.\
replace('(bottom)', 'n.res5b%s' % str(stages[3] - 1))
n.classifier = L.InnerProduct(n.pool5, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 8,064 | 50.369427 | 130 | py |
caffe-model | caffe-model-master/resnext.py | import caffe
from caffe import layers as L
from caffe import params as P
def resnext_block(bottom, base_output=64, card=32):
"""
input:4*base_output x n x n
output:4*base_output x n x n
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
Args:
card:
"""
conv1 = L.Convolution(bottom, num_output=base_output * (card / 16), kernel_size=1, stride=1, pad=0, bias_term=False,
param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
conv1_bn = L.BatchNorm(conv1, use_global_stats=False, in_place=True)
conv1_scale = L.Scale(conv1, scale_param=dict(bias_term=True), in_place=True)
conv1_relu = L.ReLU(conv1, in_place=True)
conv2 = L.Convolution(conv1, num_output=base_output * (card / 16), kernel_size=3, stride=1, pad=1, group=card,
bias_term=False, param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
conv2_bn = L.BatchNorm(conv2, use_global_stats=False, in_place=True)
conv2_scale = L.Scale(conv2, scale_param=dict(bias_term=True), in_place=True)
conv2_relu = L.ReLU(conv2, in_place=True)
conv3 = L.Convolution(conv2, num_output=base_output * 4, kernel_size=1, stride=1, pad=0, bias_term=False,
param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
conv3_bn = L.BatchNorm(conv3, use_global_stats=False, in_place=True)
conv3_scale = L.Scale(conv3, scale_param=dict(bias_term=True), in_place=True)
eltwise = L.Eltwise(bottom, conv3, eltwise_param=dict(operation=1))
eltwise_relu = L.ReLU(eltwise, in_place=True)
return conv1, conv1_bn, conv1_scale, conv1_relu, conv2, conv2_bn, conv2_scale, conv2_relu, \
conv3, conv3_bn, conv3_scale, eltwise, eltwise_relu
def match_block(bottom, base_output=64, stride=2, card=32):
"""
input:4*base_output x n x n
output:4*base_output x n x n
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
"""
conv1 = L.Convolution(bottom, num_output=base_output * (card / 16), kernel_size=1, stride=1, pad=0, bias_term=False,
param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
conv1_bn = L.BatchNorm(conv1, use_global_stats=False, in_place=True)
conv1_scale = L.Scale(conv1, scale_param=dict(bias_term=True), in_place=True)
conv1_relu = L.ReLU(conv1, in_place=True)
conv2 = L.Convolution(conv1, num_output=base_output * (card / 16), kernel_size=3, stride=stride, pad=1, group=card,
bias_term=False, param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
conv2_bn = L.BatchNorm(conv2, use_global_stats=False, in_place=True)
conv2_scale = L.Scale(conv2, scale_param=dict(bias_term=True), in_place=True)
conv2_relu = L.ReLU(conv2, in_place=True)
conv3 = L.Convolution(conv2, num_output=base_output * 4, kernel_size=1, stride=1, pad=0, bias_term=False,
param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
conv3_bn = L.BatchNorm(conv3, use_global_stats=False, in_place=True)
conv3_scale = L.Scale(conv3, scale_param=dict(bias_term=True), in_place=True)
match = L.Convolution(bottom, num_output=base_output * 4, kernel_size=1, stride=stride, pad=0, bias_term=False,
param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
match_bn = L.BatchNorm(match, use_global_stats=False, in_place=True)
match_scale = L.Scale(match, scale_param=dict(bias_term=True), in_place=True)
eltwise = L.Eltwise(match, conv3, eltwise_param=dict(operation=1))
eltwise_relu = L.ReLU(eltwise, in_place=True)
return conv1, conv1_bn, conv1_scale, conv1_relu, conv2, conv2_bn, conv2_scale, conv2_relu, \
conv3, conv3_bn, conv3_scale, match, match_bn, match_scale, eltwise, eltwise_relu
resnext_string = 'n.resx(n)_conv1, n.resx(n)_conv1_bn, n.resx(n)_conv1_scale, n.resx(n)_conv1_relu, \
n.resx(n)_conv2, n.resx(n)_conv2_bn, n.resx(n)_conv2_scale, n.resx(n)_conv2_relu, n.resx(n)_conv3, \
n.resx(n)_conv3_bn, n.resx(n)_conv3_scale, n.resx(n)_elewise, n.resx(n)_elewise_relu = \
resnext_block((bottom), base_output=(base), card=(c))'
match_string = 'n.resx(n)_conv1, n.resx(n)_conv1_bn, n.resx(n)_conv1_scale, n.resx(n)_conv1_relu, \
n.resx(n)_conv2, n.resx(n)_conv2_bn, n.resx(n)_conv2_scale, n.resx(n)_conv2_relu, n.resx(n)_conv3, \
n.resx(n)_conv3_bn, n.resx(n)_conv3_scale, n.resx(n)_match_conv, n.resx(n)_match_conv_bn, n.resx(n)_match_conv_scale,\
n.resx(n)_elewise, n.resx(n)_elewise_relu = match_block((bottom), base_output=(base), stride=(s), card=(c))'
class ResNeXt(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def resnext_layers_proto(self, batch_size, card=32, phase='TRAIN', stages=(3, 4, 6, 3)):
"""
:param batch_size: the batch_size of train and test phase
:param phase: TRAIN or TEST
:param stages: the num of layers = 2 + 3*sum(stages), layers would better be chosen from [50, 101, 152]
{every stage is composed of 1 residual_branch_shortcut module and stage[i]-1 residual_branch
modules, each module consists of 3 conv layers}
(3, 4, 6, 3) for 50 layers; (3, 4, 23, 3) for 101 layers; (3, 8, 36, 3) for 152 layers
"""
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1 = L.Convolution(n.data, num_output=64, kernel_size=7, stride=2, pad=3, bias_term=False,
param=[dict(lr_mult=1, decay_mult=1)], weight_filler=dict(type='xavier'))
n.conv1_bn = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True)
n.conv1_scale = L.Scale(n.conv1, scale_param=dict(bias_term=True), in_place=True)
n.conv1_relu = L.ReLU(n.conv1, in_place=True) # 64x112x112
n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pad=1, ceil_mode=False, pool=P.Pooling.MAX) # 64x56x56
for num in xrange(len(stages)): # num = 0, 1, 2, 3
for i in xrange(stages[num]):
if i == 0:
stage_string = match_string
bottom_string = ['n.pool1', 'n.resx{}_elewise'.format(str(sum(stages[:1]))),
'n.resx{}_elewise'.format(str(sum(stages[:2]))),
'n.resx{}_elewise'.format(str(sum(stages[:3])))][num]
else:
stage_string = resnext_string
bottom_string = 'n.resx{}_elewise'.format(str(sum(stages[:num]) + i))
print num, i
exec (stage_string.replace('(bottom)', bottom_string).
replace('(base)', str(2 ** num * 64)).
replace('(n)', str(sum(stages[:num]) + i + 1)).
replace('(s)', str(int(num > 0) + 1)).
replace('(c)', str(card)))
exec 'n.pool_ave = L.Pooling(n.resx{}_elewise, pool=P.Pooling.AVE, global_pooling=True)'.format(
str(sum(stages)))
n.classifier = L.InnerProduct(n.pool_ave, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 8,469 | 53.645161 | 122 | py |
caffe-model | caffe-model-master/inception_v4.py | import caffe
from caffe import layers as L
from caffe import params as P
def fc_relu_drop(bottom, num_output=1024, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def factorization_conv_mxn(bottom, num_output=64, kernel_h=1, kernel_w=7, stride=1, pad_h=3, pad_w=0):
conv_mxn = L.Convolution(bottom, num_output=num_output, kernel_h=kernel_h, kernel_w=kernel_w, stride=stride,
pad_h=pad_h, pad_w=pad_w,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_mxn_bn = L.BatchNorm(conv_mxn, use_global_stats=False, in_place=True)
conv_mxn_scale = L.Scale(conv_mxn, scale_param=dict(bias_term=True), in_place=True)
conv_mxn_relu = L.ReLU(conv_mxn, in_place=True)
return conv_mxn, conv_mxn_bn, conv_mxn_scale, conv_mxn_relu
def stem_v4_299x299(bottom):
"""
input:3x299x299
output:384x35x35
:param bottom: bottom layer
:return: layers
"""
conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_s2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=3, stride=2) # 32x149x149
conv2_3x3_s1, conv2_3x3_s1_bn, conv2_3x3_s1_scale, conv2_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv1_3x3_s2, num_output=32, kernel_size=3, stride=1) # 32x147x147
conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv2_3x3_s1, num_output=64, kernel_size=3, stride=1, pad=1) # 64x147x147
inception_stem1_pool = L.Pooling(conv3_3x3_s1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x73x73
inception_stem1_3x3_s2, inception_stem1_3x3_s2_bn, inception_stem1_3x3_s2_scale, inception_stem1_3x3_s2_relu = \
factorization_conv_bn_scale_relu(conv3_3x3_s1, num_output=96, kernel_size=3, stride=2) # 96x73x73
inception_stem1 = L.Concat(inception_stem1_pool, inception_stem1_3x3_s2) # 160x73x73
inception_stem2_3x3_reduce, inception_stem2_3x3_reduce_bn, inception_stem2_3x3_reduce_scale, \
inception_stem2_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(inception_stem1, num_output=64, kernel_size=1) # 64x73x73
inception_stem2_3x3, inception_stem2_3x3_bn, inception_stem2_3x3_scale, inception_stem2_3x3_relu = \
factorization_conv_bn_scale_relu(inception_stem2_3x3_reduce, num_output=96, kernel_size=3) # 96x71x71
inception_stem2_7x1_reduce, inception_stem2_7x1_reduce_bn, inception_stem2_7x1_reduce_scale, \
inception_stem2_7x1_reduce_relu = \
factorization_conv_bn_scale_relu(inception_stem1, num_output=64, kernel_size=1) # 64x73x73
inception_stem2_7x1, inception_stem2_7x1_bn, inception_stem2_7x1_scale, inception_stem2_7x1_relu = \
factorization_conv_mxn(inception_stem2_7x1_reduce, num_output=64, kernel_h=7, kernel_w=1, pad_h=3,
pad_w=0) # 64x73x73
inception_stem2_1x7, inception_stem2_1x7_bn, inception_stem2_1x7_scale, inception_stem2_1x7_relu = \
factorization_conv_mxn(inception_stem2_7x1, num_output=64, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 64x73x73
inception_stem2_3x3_2, inception_stem2_3x3_2_bn, inception_stem2_3x3_2_scale, inception_stem2_3x3_2_relu = \
factorization_conv_bn_scale_relu(inception_stem2_1x7, num_output=96, kernel_size=3) # 96x71x71
inception_stem2 = L.Concat(inception_stem2_3x3, inception_stem2_3x3_2) # 192x71x71
inception_stem3_3x3_s2, inception_stem3_3x3_s2_bn, inception_stem3_3x3_s2_scale, inception_stem3_3x3_s2_relu = \
factorization_conv_bn_scale_relu(inception_stem2, num_output=192, stride=2) # 192x35x35
inception_stem3_pool = L.Pooling(inception_stem2, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 192x35x35
inception_stem3 = L.Concat(inception_stem3_3x3_s2, inception_stem3_pool) # 384x35x35
return conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_s2_relu, conv2_3x3_s1, conv2_3x3_s1_bn, \
conv2_3x3_s1_scale, conv2_3x3_s1_relu, conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu, \
inception_stem1_3x3_s2, inception_stem1_3x3_s2_bn, inception_stem1_3x3_s2_scale, inception_stem1_3x3_s2_relu, \
inception_stem1_pool, inception_stem1, inception_stem2_3x3_reduce, inception_stem2_3x3_reduce_bn, \
inception_stem2_3x3_reduce_scale, inception_stem2_3x3_reduce_relu, inception_stem2_3x3, \
inception_stem2_3x3_bn, inception_stem2_3x3_scale, inception_stem2_3x3_relu, inception_stem2_7x1_reduce, \
inception_stem2_7x1_reduce_bn, inception_stem2_7x1_reduce_scale, inception_stem2_7x1_reduce_relu, \
inception_stem2_7x1, inception_stem2_7x1_bn, inception_stem2_7x1_scale, inception_stem2_7x1_relu, \
inception_stem2_1x7, inception_stem2_1x7_bn, inception_stem2_1x7_scale, inception_stem2_1x7_relu, \
inception_stem2_3x3_2, inception_stem2_3x3_2_bn, inception_stem2_3x3_2_scale, inception_stem2_3x3_2_relu, \
inception_stem2, inception_stem3_3x3_s2, inception_stem3_3x3_s2_bn, inception_stem3_3x3_s2_scale, \
inception_stem3_3x3_s2_relu, inception_stem3_pool, inception_stem3
def inception_v4_a(bottom):
"""
input:384x35x35
output:384x35x35
:param bottom: bottom layer
:return: layers
"""
pool_ave = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 384x35x35
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool_ave, num_output=96, kernel_size=1) # 96x35x35
conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, conv_1x1_2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=96, kernel_size=1) # 96x35x35
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=1) # 64x35x35
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=96, kernel_size=3, pad=1) # 96x35x35
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=1) # 64x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=96, kernel_size=3, pad=1) # 96x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=96, kernel_size=3, pad=1) # 96x35x35
concat = L.Concat(conv_1x1, conv_1x1_2, conv_3x3, conv_3x3_3) # 384(96+96+96+96)x35x35
return pool_ave, conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, \
conv_1x1_2_relu, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, \
conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, \
conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, conv_3x3_3, \
conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, concat
def reduction_v4_a(bottom):
"""
input:384x35x35
output:1024x17x17
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 384x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=3, stride=2) # 384x17x17
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=224, kernel_size=3, stride=1, pad=1) # 224x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=256, kernel_size=3, stride=2) # 256x17x17
concat = L.Concat(pool, conv_3x3, conv_3x3_3) # 1024(384+384+256)x17x17
return pool, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, concat
def inception_v4_b(bottom):
"""
input:1024x17x17
output:1024x17x17
:param bottom: bottom layer
:return: layers
"""
pool_ave = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 1024x17x17
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool_ave, num_output=128, kernel_size=1) # 128x17x17
conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, conv_1x1_2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=1) # 384x17x17
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=224, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 224x17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=256, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 256x17x17
conv_1x7_2_reduce, conv_1x7_2_reduce_bn, conv_1x7_2_reduce_scale, conv_1x7_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7_2, conv_1x7_2_bn, conv_1x7_2_scale, conv_1x7_2_relu = \
factorization_conv_mxn(conv_1x7_2_reduce, num_output=192, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 192x17x17
conv_7x1_2, conv_7x1_2_bn, conv_7x1_2_scale, conv_7x1_2_relu = \
factorization_conv_mxn(conv_1x7_2, num_output=224, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 224x17x17
conv_1x7_3, conv_1x7_3_bn, conv_1x7_3_scale, conv_1x7_3_relu = \
factorization_conv_mxn(conv_7x1_2, num_output=224, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 224x17x17
conv_7x1_3, conv_7x1_3_bn, conv_7x1_3_scale, conv_7x1_3_relu = \
factorization_conv_mxn(conv_1x7_3, num_output=256, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 256x17x17
concat = L.Concat(conv_1x1, conv_1x1_2, conv_7x1, conv_7x1_3) # 1024(128+384+256+256)x17x17
return pool_ave, conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, \
conv_1x1_2_relu, conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu, \
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, \
conv_1x7_2_reduce, conv_1x7_2_reduce_bn, conv_1x7_2_reduce_scale, conv_1x7_2_reduce_relu, conv_1x7_2, \
conv_1x7_2_bn, conv_1x7_2_scale, conv_1x7_2_relu, conv_7x1_2, conv_7x1_2_bn, conv_7x1_2_scale, \
conv_7x1_2_relu, conv_1x7_3, conv_1x7_3_bn, conv_1x7_3_scale, conv_1x7_3_relu, conv_7x1_3, conv_7x1_3_bn, \
conv_7x1_3_scale, conv_7x1_3_relu, concat
def reduction_v4_b(bottom):
"""
input:1024x17x17
output:1536x8x8
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 1024x8x8
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=192, kernel_size=3, stride=2) # 192x8x8
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=256, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 256x17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=320, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 320x17x17
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_7x1, num_output=320, kernel_size=3, stride=2) # 320x8x8
concat = L.Concat(pool, conv_3x3, conv_3x3_2) # 1536(1024+192+320)x8x8
return pool, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, \
conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, \
conv_1x7_reduce_relu, conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, conv_7x1, conv_7x1_bn, \
conv_7x1_scale, conv_7x1_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, concat
def inception_v4_c(bottom):
"""
input:1536x8x8
output:1536x8x8
:param bottom: bottom layer
:return: layers
"""
pool_ave = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 1536x8x8
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool_ave, num_output=256, kernel_size=1) # 256x8x8
conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, conv_1x1_2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x8x8
conv_1x1_3, conv_1x1_3_bn, conv_1x1_3_scale, conv_1x1_3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=1) # 384x8x8
conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu = \
factorization_conv_mxn(conv_1x1_3, num_output=256, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 256x8x8
conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu = \
factorization_conv_mxn(conv_1x1_3, num_output=256, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 256x8x8
conv_1x1_4, conv_1x1_4_bn, conv_1x1_4_scale, conv_1x1_4_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=1) # 384x8x8
conv_1x3_2, conv_1x3_2_bn, conv_1x3_2_scale, conv_1x3_2_relu = \
factorization_conv_mxn(conv_1x1_4, num_output=448, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 448x8x8
conv_3x1_2, conv_3x1_2_bn, conv_3x1_2_scale, conv_3x1_2_relu = \
factorization_conv_mxn(conv_1x3_2, num_output=512, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 512x8x8
conv_1x3_3, conv_1x3_3_bn, conv_1x3_3_scale, conv_1x3_3_relu = \
factorization_conv_mxn(conv_3x1_2, num_output=256, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 256x8x8
conv_3x1_3, conv_3x1_3_bn, conv_3x1_3_scale, conv_3x1_3_relu = \
factorization_conv_mxn(conv_3x1_2, num_output=256, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 256x8x8
concat = L.Concat(conv_1x1, conv_1x1_2, conv_1x3, conv_3x1, conv_1x3_3,
conv_3x1_3) # 1536(256+256+256+256+256+256)x17x17
return pool_ave, conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, \
conv_1x1_2_relu, conv_1x1_3, conv_1x1_3_bn, conv_1x1_3_scale, conv_1x1_3_relu, conv_1x3, conv_1x3_bn, \
conv_1x3_scale, conv_1x3_relu, conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu, conv_1x1_4, \
conv_1x1_4_bn, conv_1x1_4_scale, conv_1x1_4_relu, conv_1x3_2, conv_1x3_2_bn, conv_1x3_2_scale, \
conv_1x3_2_relu, conv_3x1_2, conv_3x1_2_bn, conv_3x1_2_scale, conv_3x1_2_relu, conv_1x3_3, conv_1x3_3_bn, \
conv_1x3_3_scale, conv_1x3_3_relu, conv_3x1_3, conv_3x1_3_bn, conv_3x1_3_scale, conv_3x1_3_relu, concat
string_a = 'n.inception_a(order)_pool_ave, n.inception_a(order)_1x1, n.inception_a(order)_1x1_bn, n.inception_a(order)_1x1_scale, \
n.inception_a(order)_1x1_relu, n.inception_a(order)_1x1_2, n.inception_a(order)_1x1_2_bn, n.inception_a(order)_1x1_2_scale, \
n.inception_a(order)_1x1_2_relu, n.inception_a(order)_3x3_reduce, n.inception_a(order)_3x3_reduce_bn, \
n.inception_a(order)_3x3_reduce_scale, n.inception_a(order)_3x3_reduce_relu, n.inception_a(order)_3x3, \
n.inception_a(order)_3x3_bn, n.inception_a(order)_3x3_scale, n.inception_a(order)_3x3_relu, n.inception_a(order)_3x3_2_reduce, \
n.inception_a(order)_3x3_2_reduce_bn, n.inception_a(order)_3x3_2_reduce_scale, n.inception_a(order)_3x3_2_reduce_relu, \
n.inception_a(order)_3x3_2, n.inception_a(order)_3x3_2_bn, n.inception_a(order)_3x3_2_scale, n.inception_a(order)_3x3_2_relu, \
n.inception_a(order)_3x3_3, n.inception_a(order)_3x3_3_bn, n.inception_a(order)_3x3_3_scale, n.inception_a(order)_3x3_3_relu, \
n.inception_a(order)_concat = \
inception_v4_a(bottom)'
string_b = 'n.inception_b(order)_pool_ave, n.inception_b(order)_1x1, n.inception_b(order)_1x1_bn, n.inception_b(order)_1x1_scale, \
n.inception_b(order)_1x1_relu, n.inception_b(order)_1x1_2, n.inception_b(order)_1x1_2_bn, n.inception_b(order)_1x1_2_scale, \
n.inception_b(order)_1x1_2_relu, n.inception_b(order)_1x7_reduce, n.inception_b(order)_1x7_reduce_bn, n.inception_b(order)_1x7_reduce_scale, \
n.inception_b(order)_1x7_reduce_relu, n.inception_b(order)_1x7, n.inception_b(order)_1x7_bn, n.inception_b(order)_1x7_scale, \
n.inception_b(order)_1x7_relu, n.inception_b(order)_7x1, n.inception_b(order)_7x1_bn, n.inception_b(order)_7x1_scale, n.inception_b(order)_7x1_relu, \
n.inception_b(order)_1x7_2_reduce, n.inception_b(order)_1x7_2_reduce_bn, n.inception_b(order)_1x7_2_reduce_scale, \
n.inception_b(order)_1x7_2_reduce_relu, n.inception_b(order)_1x7_2, n.inception_b(order)_1x7_2_bn, n.inception_b(order)_1x7_2_scale,\
n.inception_b(order)_1x7_2_relu, n.inception_b(order)_7x1_2, n.inception_b(order)_7x1_2_bn, n.inception_b(order)_7x1_2_scale, \
n.inception_b(order)_7x1_2_relu, n.inception_b(order)_1x7_3, n.inception_b(order)_1x7_3_bn, n.inception_b(order)_1x7_3_scale, \
n.inception_b(order)_1x7_3_relu, n.inception_b(order)_7x1_3, n.inception_b(order)_7x1_3_bn, n.inception_b(order)_7x1_3_scale, \
n.inception_b(order)_7x1_3_relu, n.inception_b(order)_concat = \
inception_v4_b(bottom)'
string_c = 'n.inception_c(order)_pool_ave, n.inception_c(order)_1x1, n.inception_c(order)_1x1_bn, n.inception_c(order)_1x1_scale, \
n.inception_c(order)_1x1_relu, n.inception_c(order)_1x1_2, n.inception_c(order)_1x1_2_bn, n.inception_c(order)_1x1_2_scale, \
n.inception_c(order)_1x1_2_relu, n.inception_c(order)_1x1_3, n.inception_c(order)_1x1_3_bn, n.inception_c(order)_1x1_3_scale, \
n.inception_c(order)_1x1_3_relu, n.inception_c(order)_1x3, n.inception_c(order)_1x3_bn, n.inception_c(order)_1x3_scale, \
n.inception_c(order)_1x3_relu, n.inception_c(order)_3x1, n.inception_c(order)_3x1_bn, n.inception_c(order)_3x1_scale, \
n.inception_c(order)_3x1_relu, n.inception_c(order)_1x1_4, n.inception_c(order)_1x1_4_bn, n.inception_c(order)_1x1_4_scale, \
n.inception_c(order)_1x1_4_relu, n.inception_c(order)_1x3_2, n.inception_c(order)_1x3_2_bn, n.inception_c(order)_1x3_2_scale, \
n.inception_c(order)_1x3_2_relu, n.inception_c(order)_3x1_2, n.inception_c(order)_3x1_2_bn, n.inception_c(order)_3x1_2_scale, \
n.inception_c(order)_3x1_2_relu, n.inception_c(order)_1x3_3, n.inception_c(order)_1x3_3_bn, n.inception_c(order)_1x3_3_scale, \
n.inception_c(order)_1x3_3_relu, n.inception_c(order)_3x1_3, n.inception_c(order)_3x1_3_bn, n.inception_c(order)_3x1_3_scale, \
n.inception_c(order)_3x1_3_relu, n.inception_c(order)_concat = \
inception_v4_c(bottom)'
class InceptionV4(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def inception_v4_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=299, mean_value=[104, 117, 123], mirror=mirror))
# stem
n.conv1_3x3_s2, n.conv1_3x3_s2_bn, n.conv1_3x3_s2_scale, n.conv1_3x3_s2_relu, n.conv2_3x3_s1, n.conv2_3x3_s1_bn, \
n.conv2_3x3_s1_scale, n.conv2_3x3_s1_relu, n.conv3_3x3_s1, n.conv3_3x3_s1_bn, n.conv3_3x3_s1_scale, n.conv3_3x3_s1_relu, \
n.inception_stem1_3x3_s2, n.inception_stem1_3x3_s2_bn, n.inception_stem1_3x3_s2_scale, n.inception_stem1_3x3_s2_relu, \
n.inception_stem1_pool, n.inception_stem1, n.inception_stem2_3x3_reduce, n.inception_stem2_3x3_reduce_bn, \
n.inception_stem2_3x3_reduce_scale, n.inception_stem2_3x3_reduce_relu, n.inception_stem2_3x3, \
n.inception_stem2_3x3_bn, n.inception_stem2_3x3_scale, n.inception_stem2_3x3_relu, n.inception_stem2_7x1_reduce, \
n.inception_stem2_7x1_reduce_bn, n.inception_stem2_7x1_reduce_scale, n.inception_stem2_7x1_reduce_relu, \
n.inception_stem2_7x1, n.inception_stem2_7x1_bn, n.inception_stem2_7x1_scale, n.inception_stem2_7x1_relu, \
n.inception_stem2_1x7, n.inception_stem2_1x7_bn, n.inception_stem2_1x7_scale, n.inception_stem2_1x7_relu, \
n.inception_stem2_3x3_2, n.inception_stem2_3x3_2_bn, n.inception_stem2_3x3_2_scale, n.inception_stem2_3x3_2_relu, \
n.inception_stem2, n.inception_stem3_3x3_s2, n.inception_stem3_3x3_s2_bn, n.inception_stem3_3x3_s2_scale, \
n.inception_stem3_3x3_s2_relu, n.inception_stem3_pool, n.inception_stem3 = \
stem_v4_299x299(n.data) # 384x35x35
# 4 x inception_a
for i in xrange(4):
if i == 0:
bottom = 'n.inception_stem3'
else:
bottom = 'n.inception_a(order)_concat'.replace('(order)', str(i))
exec (string_a.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 384x35x35
# reduction_v4_a
n.reduction_a_pool, n.reduction_a_3x3, n.reduction_a_3x3_bn, n.reduction_a_3x3_scale, n.reduction_a_3x3_relu, \
n.reduction_a_3x3_2_reduce, n.reduction_a_3x3_2_reduce_bn, n.reduction_a_3x3_2_reduce_scale, \
n.reduction_a_3x3_2_reduce_relu, n.reduction_a_3x3_2, n.reduction_a_3x3_2_bn, n.reduction_a_3x3_2_scale, \
n.reduction_a_3x3_2_relu, n.reduction_a_3x3_3, n.reduction_a_3x3_3_bn, n.reduction_a_3x3_3_scale, \
n.reduction_a_3x3_3_relu, n.reduction_a_concat = \
reduction_v4_a(n.inception_a4_concat) # 1024x17x17
# 7 x inception_b
for i in xrange(7):
if i == 0:
bottom = 'n.reduction_a_concat'
else:
bottom = 'n.inception_b(order)_concat'.replace('(order)', str(i))
exec (string_b.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 1024x17x17
# reduction_v4_b
n.reduction_b_pool, n.reduction_b_3x3_reduce, n.reduction_b_3x3_reduce_bn, n.reduction_b_3x3_reduce_scale, \
n.reduction_b_3x3_reduce_relu, n.reduction_b_3x3, n.reduction_b_3x3_bn, n.reduction_b_3x3_scale, n.reduction_b_3x3_relu, \
n.reduction_b_1x7_reduce, n.reduction_b_1x7_reduce_bn, n.reduction_b_1x7_reduce_scale, n.reduction_b_1x7_reduce_relu, \
n.reduction_b_1x7, n.reduction_b_1x7_bn, n.reduction_b_1x7_scale, n.reduction_b_1x7_relu, n.reduction_b_7x1, n.reduction_b_7x1_bn, \
n.reduction_b_7x1_scale, n.reduction_b_7x1_relu, n.reduction_b_3x3_2, n.reduction_b_3x3_2_bn, n.reduction_b_3x3_2_scale, \
n.reduction_b_3x3_2_relu, n.reduction_b_concat = \
reduction_v4_b(n.inception_b7_concat) # 1536x8x8
# 3 x inception_c
for i in xrange(3):
if i == 0:
bottom = 'n.reduction_b_concat'
else:
bottom = 'n.inception_c(order)_concat'.replace('(order)', str(i))
exec (string_c.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 1536x8x8
n.pool_8x8_s1 = L.Pooling(n.inception_c3_concat, pool=P.Pooling.AVE, global_pooling=True) # 1536x1x1
n.pool_8x8_s1_drop = L.Dropout(n.pool_8x8_s1, dropout_param=dict(dropout_ratio=0.2))
n.classifier = L.InnerProduct(n.pool_8x8_s1_drop, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 26,845 | 65.947631 | 159 | py |
caffe-model | caffe-model-master/inception_v3.py | import caffe
from caffe import layers as L
from caffe import params as P
def fc_relu_drop(bottom, num_output=1024, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def factorization_conv_mxn(bottom, num_output=64, kernel_h=1, kernel_w=7, stride=1, pad_h=3, pad_w=0):
conv_mxn = L.Convolution(bottom, num_output=num_output, kernel_h=kernel_h, kernel_w=kernel_w, stride=stride,
pad_h=pad_h, pad_w=pad_w,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_mxn_bn = L.BatchNorm(conv_mxn, use_global_stats=False, in_place=True)
conv_mxn_scale = L.Scale(conv_mxn, scale_param=dict(bias_term=True), in_place=True)
conv_mxn_relu = L.ReLU(conv_mxn, in_place=True)
return conv_mxn, conv_mxn_bn, conv_mxn_scale, conv_mxn_relu
def stem_v3_299x299(bottom):
"""
input:3x299x299
output:192x35x35
:param bottom: bottom layer
:return: layers
"""
conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=3, stride=2) # 32x149x149
conv2_3x3_s1, conv2_3x3_s1_bn, conv2_3x3_s1_scale, conv2_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv1_3x3_s2, num_output=32, kernel_size=3) # 32x147x147
conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv2_3x3_s1, num_output=64, kernel_size=3, pad=1) # 64x147x147
pool1_3x3_s2 = L.Pooling(conv3_3x3_s1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x73x73
conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(pool1_3x3_s2, num_output=80, kernel_size=1) # 80x73x73
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu = \
factorization_conv_bn_scale_relu(conv4_3x3_reduce, num_output=192, kernel_size=3) # 192x71x71
pool2_3x3_s2 = L.Pooling(conv4_3x3, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 192x35x35
return conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu, conv2_3x3_s1, conv2_3x3_s1_bn, \
conv2_3x3_s1_scale, conv2_3x3_s1_relu, conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu, \
pool1_3x3_s2, conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu, \
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu, pool2_3x3_s2
def inception_v3_a(bottom, pool_proj_num_output=32):
"""
input:192or256or288x35x35
output:256or288x35x35
:param pool_proj_num_output: num_output of pool_proj
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=1) # 64x35x35
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=48, kernel_size=1) # 48x35x35
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu = \
factorization_conv_bn_scale_relu(conv_5x5_reduce, num_output=64, kernel_size=5, pad=2) # 64x35x35
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, kernel_size=1, num_output=64) # 64x35x35
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, kernel_size=3, num_output=96, pad=1) # 96x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3, kernel_size=3, num_output=96, pad=1) # 96x35x35
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 192x35x35
pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu = \
factorization_conv_bn_scale_relu(pool, kernel_size=1, num_output=pool_proj_num_output) # 32x35x35
concat = L.Concat(conv_1x1, conv_5x5, conv_3x3_2, pool_proj) # 256or288(64+64+96+32or64)x35x35
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_5x5_reduce, conv_5x5_reduce_bn, \
conv_5x5_reduce_scale, conv_5x5_reduce_relu, conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu, \
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, \
conv_3x3_scale, conv_3x3_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, pool, \
pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu, concat
def reduction_v3_a(bottom):
"""
input:288x35x35
output:768x17x17
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 384x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, kernel_size=3, num_output=384, stride=2) # 384x17x17
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=1) # 64x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=96, kernel_size=3, pad=1) # 96x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=96, kernel_size=3, stride=2) # 96x17x17
concat = L.Concat(pool, conv_3x3, conv_3x3_3) # 768(288+384+96)x17x17
return pool, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, concat
def inception_v3_b(bottom, outs=128):
"""
input:768x17x17
output:768x17x17
:param outs: num_outputs
:param bottom: bottom layer
:return: layers
"""
pool_ave = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 768x17x17
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool_ave, num_output=192, kernel_size=1) # 192x17x17
conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, conv_1x1_2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=outs, kernel_size=1) # outsx17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=outs, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # outsx17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 192x17x17
conv_7x1_reduce, conv_7x1_reduce_bn, conv_7x1_reduce_scale, conv_7x1_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=outs, kernel_size=1) # outsx17x17
conv_7x1_2, conv_7x1_2_bn, conv_7x1_2_scale, conv_7x1_2_relu = \
factorization_conv_mxn(conv_7x1_reduce, num_output=outs, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # outsx17x17
conv_1x7_2, conv_1x7_2_bn, conv_1x7_2_scale, conv_1x7_2_relu = \
factorization_conv_mxn(conv_7x1_2, num_output=outs, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # outsx17x17
conv_7x1_3, conv_7x1_3_bn, conv_7x1_3_scale, conv_7x1_3_relu = \
factorization_conv_mxn(conv_1x7_2, num_output=outs, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # outsx17x17
conv_1x7_3, conv_1x7_3_bn, conv_1x7_3_scale, conv_1x7_3_relu = \
factorization_conv_mxn(conv_7x1_3, num_output=192, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 192x17x17
concat = L.Concat(conv_1x1_2, conv_7x1, conv_1x7_3, conv_1x1) # 768(192+192+192+192)x17x17
return pool_ave, conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, \
conv_1x1_2_relu, conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu, \
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, \
conv_7x1_reduce, conv_7x1_reduce_bn, conv_7x1_reduce_scale, conv_7x1_reduce_relu, conv_7x1_2, conv_7x1_2_bn, \
conv_7x1_2_scale, conv_7x1_2_relu, conv_1x7_2, conv_1x7_2_bn, conv_1x7_2_scale, conv_1x7_2_relu, conv_7x1_3, \
conv_7x1_3_bn, conv_7x1_3_scale, conv_7x1_3_relu, conv_1x7_3, conv_1x7_3_bn, conv_1x7_3_scale, conv_1x7_3_relu, \
concat
def reduction_v3_b(bottom):
"""
input:768x17x17
output:1280x8x8
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 768x8x8
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=320, kernel_size=3, stride=2) # 320x8x8
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=192, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 192x17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 192x17x17
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_7x1, num_output=192, kernel_size=3, stride=2) # 192x8x8
concat = L.Concat(pool, conv_3x3, conv_3x3_2) # 1280(768+320+192)x8x8
return pool, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, \
conv_3x3_scale, conv_3x3_relu, conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu, \
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, \
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, concat
def inception_v3_c(bottom, pool=P.Pooling.AVE):
"""
input:1280or2048x8x8
output:2048x8x8
:param pool: pool_type
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=pool) # 1280or2048x8x8
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool, num_output=192, kernel_size=1) # 192x8x8
conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, conv_1x1_2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=320, kernel_size=1) # 320x8x8
conv_1x3_reduce, conv_1x3_reduce_bn, conv_1x3_reduce_scale, conv_1x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=1) # 384x8x8
conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu = \
factorization_conv_mxn(conv_1x3_reduce, num_output=384, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 384x8x8
conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu = \
factorization_conv_mxn(conv_1x3_reduce, num_output=384, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 384x8x8
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=448, kernel_size=1) # 448x8x8
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=384, kernel_size=3, pad=1) # 384x8x8
conv_1x3_2, conv_1x3_2_bn, conv_1x3_2_scale, conv_1x3_2_relu = \
factorization_conv_mxn(conv_3x3, num_output=384, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 384x8x8
conv_3x1_2, conv_3x1_2_bn, conv_3x1_2_scale, conv_3x1_2_relu = \
factorization_conv_mxn(conv_3x3, num_output=384, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 384x8x8
concat = L.Concat(conv_1x1_2, conv_1x3, conv_3x1, conv_1x3_2, conv_3x1_2, conv_1x1) # 2048(192+320+384+384+384+384)x8x8
return pool, conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, \
conv_1x1_2_relu, conv_1x3_reduce, conv_1x3_reduce_bn, conv_1x3_reduce_scale, conv_1x3_reduce_relu, conv_1x3, \
conv_1x3_bn, conv_1x3_scale, conv_1x3_relu, conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu, \
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, \
conv_3x3_scale, conv_3x3_relu, conv_1x3_2, conv_1x3_2_bn, conv_1x3_2_scale, conv_1x3_2_relu, conv_3x1_2, \
conv_3x1_2_bn, conv_3x1_2_scale, conv_3x1_2_relu, concat
class InceptionV3(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def inception_v3_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=299, mean_value=[104, 117, 123], mirror=mirror))
# stem
n.conv1_3x3_s2, n.conv1_3x3_s2_bn, n.conv1_3x3_s2_scale, n.conv1_3x3_relu, n.conv2_3x3_s1, n.conv2_3x3_s1_bn, \
n.conv2_3x3_s1_scale, n.conv2_3x3_relu, n.conv3_3x3_s1, n.conv3_3x3_s1_bn, n.conv3_3x3_s1_scale, n.conv3_3x3_relu, \
n.pool1_3x3_s2, n.conv4_3x3_reduce, n.conv4_3x3_reduce_bn, n.conv4_relu_3x3_reduce, n.conv4_3x3_reduce_scale, \
n.conv4_3x3, n.conv4_3x3_bn, n.conv4_3x3_scale, n.conv4_relu_3x3, n.pool2_3x3_s2 = \
stem_v3_299x299(n.data) # 192x35x35
# 3 x inception_v3_a
n.inception_a1_1x1, n.inception_a1_1x1_bn, n.inception_a1_1x1_scale, n.inception_a1_1x1_relu, n.inception_a1_5x5_reduce, \
n.inception_a1_5x5_reduce_bn, n.inception_a1_5x5_reduce_scale, n.inception_a1_5x5_reduce_relu, n.inception_a1_5x5, \
n.inception_a1_5x5_bn, n.inception_a1_5x5_scale, n.inception_a1_5x5_relu, n.inception_a1_3x3_reduce, \
n.inception_a1_3x3_reduce_bn, n.inception_a1_3x3_reduce_scale, n.inception_a1_3x3_reduce_relu, n.inception_a1_3x3_1, \
n.inception_a1_3x3_bn, n.inception_a1_3x3_scale, n.inception_a1_3x3_relu, n.inception_a1_3x3_2, \
n.inception_a1_3x3_2_bn, n.inception_a1_3x3_2_scale, n.inception_a1_3x3_2_relu, n.inception_a1_pool, \
n.inception_a1_pool_proj, n.inception_a1_pool_proj_bn, n.inception_a1_pool_proj_scale, n.inception_a1_pool_proj_relu, \
n.inception_a1_output = \
inception_v3_a(n.pool2_3x3_s2) # 256x35x35
n.inception_a2_1x1, n.inception_a2_1x1_bn, n.inception_a2_1x1_scale, n.inception_a2_1x1_relu, n.inception_a2_5x5_reduce, \
n.inception_a2_5x5_reduce_bn, n.inception_a2_5x5_reduce_scale, n.inception_a2_5x5_reduce_relu, n.inception_a2_5x5, \
n.inception_a2_5x5_bn, n.inception_a2_5x5_scale, n.inception_a2_5x5_relu, n.inception_a2_3x3_reduce, \
n.inception_a2_3x3_reduce_bn, n.inception_a2_3x3_reduce_scale, n.inception_a2_3x3_reduce_relu, n.inception_a2_3x3_1, \
n.inception_a2_3x3_bn, n.inception_a2_3x3_scale, n.inception_a2_3x3_relu, n.inception_a2_3x3_2, \
n.inception_a2_3x3_2_bn, n.inception_a2_3x3_2_scale, n.inception_a2_3x3_2_relu, n.inception_a2_pool, \
n.inception_a2_pool_proj, n.inception_a2_pool_proj_bn, n.inception_a2_pool_proj_scale, n.inception_a2_pool_proj_relu, \
n.inception_a2_output = \
inception_v3_a(n.inception_a1_output, pool_proj_num_output=64) # 288x35x35
n.inception_a3_1x1, n.inception_a3_1x1_bn, n.inception_a3_1x1_scale, n.inception_a3_1x1_relu, n.inception_a3_5x5_reduce, \
n.inception_a3_5x5_reduce_bn, n.inception_a3_5x5_reduce_scale, n.inception_a3_5x5_reduce_relu, n.inception_a3_5x5, \
n.inception_a3_5x5_bn, n.inception_a3_5x5_scale, n.inception_a3_5x5_relu, n.inception_a3_3x3_reduce, \
n.inception_a3_3x3_reduce_bn, n.inception_a3_3x3_reduce_scale, n.inception_a3_3x3_reduce_relu, n.inception_a3_3x3_1, \
n.inception_a3_3x3_bn, n.inception_a3_3x3_scale, n.inception_a3_3x3_relu, n.inception_a3_3x3_2, \
n.inception_a3_3x3_2_bn, n.inception_a3_3x3_2_scale, n.inception_a3_3x3_2_relu, n.inception_a3_pool, \
n.inception_a3_pool_proj, n.inception_a3_pool_proj_bn, n.inception_a3_pool_proj_scale, n.inception_a3_pool_proj_relu, \
n.inception_a3_output = \
inception_v3_a(n.inception_a2_output, pool_proj_num_output=64) # 288x35x35
# reduction_v3_a
n.reduction_a_pool, n.reduction_a_3x3, n.reduction_a_3x3_bn, n.reduction_a_3x3_scale, n.reduction_a_3x3_relu, \
n.reduction_a_3x3_2_reduce, n.reduction_a_3x3_2_reduce_bn, n.reduction_a_3x3_2_reduce_scale, n.reduction_a_3x3_2_reduce_relu, \
n.reduction_a_3x3_2, n.reduction_a_3x3_2_bn, n.reduction_a_3x3_2_scale, n.reduction_a_3x3_2_relu, n.reduction_a_3x3_3, \
n.reduction_a_3x3_3_bn, n.reduction_a_3x3_3_scale, n.reduction_a_3x3_3_relu, n.reduction_a_concat = \
reduction_v3_a(n.inception_a3_output) # 768x17x17
# 4 x inception_v3_b
n.inception_b1_pool_ave, n.inception_b1_1x1, n.inception_b1_1x1_bn, n.inception_b1_1x1_scale, n.inception_b1_1x1_relu, \
n.inception_b1_1x1_2, n.inception_b1_1x1_2_bn, n.inception_b1_1x1_2_scale, n.inception_b1_1x1_2_relu, \
n.inception_b1_1x7_reduce, n.inception_b1_1x7_reduce_bn, n.inception_b1_1x7_reduce_scale, n.inception_b1_1x7_reduce_relu, \
n.inception_b1_1x7, n.inception_b1_1x7_bn, n.inception_b1_1x7_scale, n.inception_b1_1x7_relu, n.inception_b1_7x1, \
n.inception_b1_7x1_bn, n.inception_b1_7x1_scale, n.inception_b1_7x1_relu, n.inception_b1_7x1_reduce, n.inception_b1_7x1_reduce_bn, \
n.inception_b1_7x1_reduce_scale, n.inception_b1_7x1_reduce_relu, n.inception_b1_7x1_2, n.inception_b1_7x1_2_bn, \
n.inception_b1_7x1_2_scale, n.inception_b1_7x1_2_relu, n.inception_b1_1x7_2, n.inception_b1_1x7_2_bn, n.inception_b1_1x7_2_scale, \
n.inception_b1_1x7_2_relu, n.inception_b1_7x1_3, n.inception_b1_7x1_3_bn, n.inception_b1_7x1_3_scale, n.inception_b1_7x1_3_relu, \
n.inception_b1_1x7_3, n.inception_b1_1x7_3_bn, n.inception_b1_1x7_3_scale, n.inception_b1_1x7_3_relu, n.inception_b1_concat = \
inception_v3_b(n.reduction_a_concat, outs=128) # 768x17x17
n.inception_b2_pool_ave, n.inception_b2_1x1, n.inception_b2_1x1_bn, n.inception_b2_1x1_scale, n.inception_b2_1x1_relu, \
n.inception_b2_1x1_2, n.inception_b2_1x1_2_bn, n.inception_b2_1x1_2_scale, n.inception_b2_1x1_2_relu, \
n.inception_b2_1x7_reduce, n.inception_b2_1x7_reduce_bn, n.inception_b2_1x7_reduce_scale, n.inception_b2_1x7_reduce_relu, \
n.inception_b2_1x7, n.inception_b2_1x7_bn, n.inception_b2_1x7_scale, n.inception_b2_1x7_relu, n.inception_b2_7x1, \
n.inception_b2_7x1_bn, n.inception_b2_7x1_scale, n.inception_b2_7x1_relu, n.inception_b2_7x1_reduce, n.inception_b2_7x1_reduce_bn, \
n.inception_b2_7x1_reduce_scale, n.inception_b2_7x1_reduce_relu, n.inception_b2_7x1_2, n.inception_b2_7x1_2_bn, \
n.inception_b2_7x1_2_scale, n.inception_b2_7x1_2_relu, n.inception_b2_1x7_2, n.inception_b2_1x7_2_bn, n.inception_b2_1x7_2_scale, \
n.inception_b2_1x7_2_relu, n.inception_b2_7x1_3, n.inception_b2_7x1_3_bn, n.inception_b2_7x1_3_scale, n.inception_b2_7x1_3_relu, \
n.inception_b2_1x7_3, n.inception_b2_1x7_3_bn, n.inception_b2_1x7_3_scale, n.inception_b2_1x7_3_relu, n.inception_b2_concat = \
inception_v3_b(n.inception_b1_concat, outs=160) # 768x17x17
n.inception_b3_pool_ave, n.inception_b3_1x1, n.inception_b3_1x1_bn, n.inception_b3_1x1_scale, n.inception_b3_1x1_relu, \
n.inception_b3_1x1_2, n.inception_b3_1x1_2_bn, n.inception_b3_1x1_2_scale, n.inception_b3_1x1_2_relu, \
n.inception_b3_1x7_reduce, n.inception_b3_1x7_reduce_bn, n.inception_b3_1x7_reduce_scale, n.inception_b3_1x7_reduce_relu, \
n.inception_b3_1x7, n.inception_b3_1x7_bn, n.inception_b3_1x7_scale, n.inception_b3_1x7_relu, n.inception_b3_7x1, \
n.inception_b3_7x1_bn, n.inception_b3_7x1_scale, n.inception_b3_7x1_relu, n.inception_b3_7x1_reduce, n.inception_b3_7x1_reduce_bn, \
n.inception_b3_7x1_reduce_scale, n.inception_b3_7x1_reduce_relu, n.inception_b3_7x1_2, n.inception_b3_7x1_2_bn, \
n.inception_b3_7x1_2_scale, n.inception_b3_7x1_2_relu, n.inception_b3_1x7_2, n.inception_b3_1x7_2_bn, n.inception_b3_1x7_2_scale, \
n.inception_b3_1x7_2_relu, n.inception_b3_7x1_3, n.inception_b3_7x1_3_bn, n.inception_b3_7x1_3_scale, n.inception_b3_7x1_3_relu, \
n.inception_b3_1x7_3, n.inception_b3_1x7_3_bn, n.inception_b3_1x7_3_scale, n.inception_b3_1x7_3_relu, n.inception_b3_concat = \
inception_v3_b(n.inception_b2_concat, outs=160) # 768x17x17
n.inception_b4_pool_ave, n.inception_b4_1x1, n.inception_b4_1x1_bn, n.inception_b4_1x1_scale, n.inception_b4_1x1_relu, \
n.inception_b4_1x1_2, n.inception_b4_1x1_2_bn, n.inception_b4_1x1_2_scale, n.inception_b4_1x1_2_relu, \
n.inception_b4_1x7_reduce, n.inception_b4_1x7_reduce_bn, n.inception_b4_1x7_reduce_scale, n.inception_b4_1x7_reduce_relu, \
n.inception_b4_1x7, n.inception_b4_1x7_bn, n.inception_b4_1x7_scale, n.inception_b4_1x7_relu, n.inception_b4_7x1, \
n.inception_b4_7x1_bn, n.inception_b4_7x1_scale, n.inception_b4_7x1_relu, n.inception_b4_7x1_reduce, n.inception_b4_7x1_reduce_bn, \
n.inception_b4_7x1_reduce_scale, n.inception_b4_7x1_reduce_relu, n.inception_b4_7x1_2, n.inception_b4_7x1_2_bn, \
n.inception_b4_7x1_2_scale, n.inception_b4_7x1_2_relu, n.inception_b4_1x7_2, n.inception_b4_1x7_2_bn, n.inception_b4_1x7_2_scale, \
n.inception_b4_1x7_2_relu, n.inception_b4_7x1_3, n.inception_b4_7x1_3_bn, n.inception_b4_7x1_3_scale, n.inception_b4_7x1_3_relu, \
n.inception_b4_1x7_3, n.inception_b4_1x7_3_bn, n.inception_b4_1x7_3_scale, n.inception_b4_1x7_3_relu, n.inception_b4_concat = \
inception_v3_b(n.inception_b3_concat, outs=192) # 768x17x17
# loss 1
n.auxiliary_loss_ave_pool = L.Pooling(n.inception_b4_concat, kernel_size=5, stride=3,
pool=P.Pooling.AVE) # 768x5x5
n.auxiliary_loss_conv, n.auxiliary_loss_conv_bn, n.auxiliary_loss_conv_scale, n.auxiliary_loss_relu_conv = \
factorization_conv_bn_scale_relu(n.auxiliary_loss_ave_pool, num_output=128, kernel_size=1) # 128x1x1
n.auxiliary_loss_fc = L.InnerProduct(n.auxiliary_loss_conv, num_output=768,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
n.auxiliary_loss_fc_relu = L.ReLU(n.auxiliary_loss_fc, in_place=True)
n.auxiliary_loss_classifier = L.InnerProduct(n.auxiliary_loss_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.auxiliary_loss = L.SoftmaxWithLoss(n.auxiliary_loss_classifier, n.label, loss_weight=0.4)
# reduction_v3_b
n.reduction_b_pool, n.reduction_b_3x3_reduce, n.reduction_b_3x3_reduce_bn, n.reduction_b_3x3_reduce_scale, \
n.reduction_b_3x3_reduce_relu, n.reduction_b_3x3, n.reduction_b_3x3_bn, n.reduction_b_3x3_scale, n.reduction_b_3x3_relu, \
n.reduction_b_1x7_reduce, n.reduction_b_1x7_reduce_bn, n.reduction_b_1x7_reduce_scale, n.reduction_b_1x7_reduce_relu, \
n.reduction_b_1x7, n.reduction_b_1x7_bn, n.reduction_b_1x7_scale, n.reduction_b_1x7_relu, n.reduction_b_7x1, \
n.reduction_b_7x1_bn, n.reduction_b_7x1_scale, n.reduction_b_7x1_relu, n.reduction_b_3x3_2, n.reduction_b_3x3_2_bn, \
n.reduction_b_3x3_2_scale, n.reduction_b_3x3_2_relu, n.reduction_b_concat = \
reduction_v3_b(n.inception_b4_concat) # 1280x8x8
# 2 x inception_v3_c
n.inception_c1_pool, n.inception_c1_1x1, n.inception_c1_1x1_bn, n.inception_c1_1x1_scale, n.inception_c1_1x1_relu, \
n.inception_c1_1x1_2, n.inception_c1_1x1_2_bn, n.inception_c1_1x1_2_scale, n.inception_c1_1x1_2_relu, \
n.inception_c1_1x3_reduce, n.inception_c1_1x3_reduce_bn, n.inception_c1_1x3_reduce_scale, n.inception_c1_1x3_reduce_relu, \
n.inception_c1_1x3, n.inception_c1_1x3_bn, n.inception_c1_1x3_scale, n.inception_c1_1x3_relu, n.inception_c1_3x1, \
n.inception_c1_3x1_bn, n.inception_c1_3x1_scale, n.inception_c1_3x1_relu, n.inception_c1_3x3_reduce, \
n.inception_c1_3x3_reduce_bn, n.inception_c1_3x3_reduce_scale, n.inception_c1_3x3_reduce_relu, n.inception_c1_3x3, \
n.inception_c1_3x3_bn, n.inception_c1_3x3_scale, n.inception_c1_3x3_relu, n.inception_c1_1x3_2, n.inception_c1_1x3_2_bn, \
n.inception_c1_1x3_2_scale, n.inception_c1_1x3_2_relu, n.inception_c1_3x1_2, n.inception_c1_3x1_2_bn, n.inception_c1_3x1_2_scale, \
n.inception_c1_3x1_2_relu, n.inception_c1_concat = \
inception_v3_c(n.reduction_b_concat) # 2048x8x8
n.inception_c2_pool, n.inception_c2_1x1, n.inception_c2_1x1_bn, n.inception_c2_1x1_scale, n.inception_c2_1x1_relu, \
n.inception_c2_1x1_2, n.inception_c2_1x1_2_bn, n.inception_c2_1x1_2_scale, n.inception_c2_1x1_2_relu, \
n.inception_c2_1x3_reduce, n.inception_c2_1x3_reduce_bn, n.inception_c2_1x3_reduce_scale, n.inception_c2_1x3_reduce_relu, \
n.inception_c2_1x3, n.inception_c2_1x3_bn, n.inception_c2_1x3_scale, n.inception_c2_1x3_relu, n.inception_c2_3x1, \
n.inception_c2_3x1_bn, n.inception_c2_3x1_scale, n.inception_c2_3x1_relu, n.inception_c2_3x3_reduce, \
n.inception_c2_3x3_reduce_bn, n.inception_c2_3x3_reduce_scale, n.inception_c2_3x3_reduce_relu, n.inception_c2_3x3, \
n.inception_c2_3x3_bn, n.inception_c2_3x3_scale, n.inception_c2_3x3_relu, n.inception_c2_1x3_2, n.inception_c2_1x3_2_bn, \
n.inception_c2_1x3_2_scale, n.inception_c2_1x3_2_relu, n.inception_c2_3x1_2, n.inception_c2_3x1_2_bn, n.inception_c2_3x1_2_scale, \
n.inception_c2_3x1_2_relu, n.inception_c2_concat = \
inception_v3_c(n.inception_c1_concat, pool=P.Pooling.MAX) # 2048x8x8
# loss 2
n.pool_8x8_s1 = L.Pooling(n.inception_c2_concat, kernel_size=8, pool=P.Pooling.AVE)
n.pool_8x8_s1_drop = L.Dropout(n.pool_8x8_s1, dropout_param=dict(dropout_ratio=0.2))
n.classifier = L.InnerProduct(n.pool_8x8_s1_drop, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 29,733 | 70.995157 | 140 | py |
caffe-model | caffe-model-master/vggnet.py | from caffe import layers as L
from caffe import params as P
import caffe
def conv_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=1):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
relu = L.ReLU(conv, in_place=True)
return conv, relu
def fc_relu_drop(bottom, fc_num_output=4096, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=fc_num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True, dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=1):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
relu = L.ReLU(conv, in_place=True)
return conv, bn, scale, relu
def accuracy_top1_top5(bottom, label):
accuracy_top1 = L.Accuracy(bottom, label, include=dict(phase=1))
accuracy_top5 = L.Accuracy(bottom, label, include=dict(phase=1), accuracy_param=dict(top_k=5))
return accuracy_top1, accuracy_top5
class VggNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def vgg_16_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_1, n.relu1_1 = conv_relu(n.data, num_output=64)
n.conv1_2, n.relu1_2 = conv_relu(n.conv1_1, num_output=64)
n.pool1 = L.Pooling(n.conv1_2, pool=P.Pooling.MAX, kernel_size=2, stride=2) # 64x112x112
n.conv2_1, n.relu2_1 = conv_relu(n.pool1, num_output=128)
n.conv2_2, n.relu2_2 = conv_relu(n.conv2_1, num_output=128)
n.pool2 = L.Pooling(n.conv2_2, pool=P.Pooling.MAX, kernel_size=2, stride=2) # 128x56x56
n.conv3_1, n.relu3_1 = conv_relu(n.pool2, num_output=256)
n.conv3_2, n.relu3_2 = conv_relu(n.conv3_1, num_output=256)
n.conv3_3, n.relu3_3 = conv_relu(n.conv3_2, num_output=256)
n.pool3 = L.Pooling(n.conv3_3, pool=P.Pooling.MAX, kernel_size=2, stride=2) # 256x28x28
n.conv4_1, n.relu4_1 = conv_relu(n.pool3, num_output=512)
n.conv4_2, n.relu4_2 = conv_relu(n.conv4_1, num_output=512)
n.conv4_3, n.relu4_3 = conv_relu(n.conv4_2, num_output=512)
n.pool4 = L.Pooling(n.conv4_3, pool=P.Pooling.MAX, kernel_size=2, stride=2) # 512x14x14
n.conv5_1, n.relu5_1 = conv_relu(n.pool4, num_output=512)
n.conv5_2, n.relu5_2 = conv_relu(n.conv5_1, num_output=512)
n.conv5_3, n.relu5_3 = conv_relu(n.conv5_2, num_output=512)
n.pool5 = L.Pooling(n.conv5_3, pool=P.Pooling.MAX, kernel_size=2, stride=2) # 512x7x7
n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, fc_num_output=4096, dropout_ratio=0.5)
n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, fc_num_output=4096, dropout_ratio=0.5)
n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.fc8, n.label)
return n.to_proto()
def vgg_16_bn_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_1, n.bn1_1, n.scale1_1, n.relu1_1 = conv_bn_scale_relu(n.data, num_output=64)
n.conv1_2, n.bn1_2, n.scale1_2, n.relu1_2 = conv_bn_scale_relu(n.conv1_1, num_output=64)
n.pool1 = L.Pooling(n.conv1_2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv2_1, n.bn2_1, n.scale2_1, n.relu2_1 = conv_bn_scale_relu(n.pool1, num_output=128)
n.conv2_2, n.bn2_2, n.scale2_2, n.relu2_2 = conv_bn_scale_relu(n.conv2_1, num_output=128)
n.pool2 = L.Pooling(n.conv2_2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv3_1, n.bn3_1, n.scale3_1, n.relu3_1 = conv_bn_scale_relu(n.pool2, num_output=256)
n.conv3_2, n.bn3_2, n.scale3_2, n.relu3_2 = conv_bn_scale_relu(n.conv3_1, num_output=256)
n.conv3_3, n.bn3_3, n.scale3_3, n.relu3_3 = conv_bn_scale_relu(n.conv3_2, num_output=256)
n.pool3 = L.Pooling(n.conv3_3, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv4_1, n.bn4_1, n.scale4_1, n.relu4_1 = conv_bn_scale_relu(n.pool3, num_output=512)
n.conv4_2, n.bn4_2, n.scale4_2, n.relu4_2 = conv_bn_scale_relu(n.conv4_1, num_output=512)
n.conv4_3, n.bn4_3, n.scale4_3, n.relu4_3 = conv_bn_scale_relu(n.conv4_2, num_output=512)
n.pool4 = L.Pooling(n.conv4_3, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv5_1, n.bn5_1, n.scale5_1, n.relu5_1 = conv_bn_scale_relu(n.pool4, num_output=512)
n.conv5_2, n.bn5_2, n.scale5_2, n.relu5_2 = conv_bn_scale_relu(n.conv5_1, num_output=512)
n.conv5_3, n.bn5_3, n.scale5_3, n.relu5_3 = conv_bn_scale_relu(n.conv5_2, num_output=512)
n.pool5 = L.Pooling(n.conv5_3, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, fc_num_output=4096, dropout_ratio=0.5)
n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, fc_num_output=4096, dropout_ratio=0.5)
n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.fc8, n.label)
return n.to_proto()
def vgg_19_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_1, n.relu1_1 = conv_relu(n.data, num_output=64)
n.conv1_2, n.relu1_2 = conv_relu(n.conv1_1, num_output=64)
n.pool1 = L.Pooling(n.conv1_2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv2_1, n.relu2_1 = conv_relu(n.pool1, num_output=128)
n.conv2_2, n.relu2_2 = conv_relu(n.conv2_1, num_output=128)
n.pool2 = L.Pooling(n.conv2_2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv3_1, n.relu3_1 = conv_relu(n.pool2, num_output=256)
n.conv3_2, n.relu3_2 = conv_relu(n.conv3_1, num_output=256)
n.conv3_3, n.relu3_3 = conv_relu(n.conv3_2, num_output=256)
n.conv3_4, n.relu3_4 = conv_relu(n.conv3_3, num_output=256)
n.pool3 = L.Pooling(n.conv3_4, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv4_1, n.relu4_1 = conv_relu(n.pool3, num_output=512)
n.conv4_2, n.relu4_2 = conv_relu(n.conv4_1, num_output=512)
n.conv4_3, n.relu4_3 = conv_relu(n.conv4_2, num_output=512)
n.conv4_4, n.relu4_4 = conv_relu(n.conv4_3, num_output=512)
n.pool4 = L.Pooling(n.conv4_4, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv5_1, n.relu5_1 = conv_relu(n.pool4, num_output=512)
n.conv5_2, n.relu5_2 = conv_relu(n.conv5_1, num_output=512)
n.conv5_3, n.relu5_3 = conv_relu(n.conv5_2, num_output=512)
n.conv5_4, n.relu5_4 = conv_relu(n.conv5_3, num_output=512)
n.pool5 = L.Pooling(n.conv5_4, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, fc_num_output=4096, dropout_ratio=0.5)
n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, fc_num_output=4096, dropout_ratio=0.5)
n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.fc8, n.label)
return n.to_proto()
def vgg_19_bn_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_1, n.bn1_1, n.scale1_1, n.relu1_1 = conv_bn_scale_relu(n.data, num_output=64)
n.conv1_2, n.bn1_2, n.scale1_2, n.relu1_2 = conv_bn_scale_relu(n.conv1_1, num_output=64)
n.pool1 = L.Pooling(n.conv1_2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv2_1, n.bn2_1, n.scale2_1, n.relu2_1 = conv_bn_scale_relu(n.pool1, num_output=128)
n.conv2_2, n.bn2_2, n.scale2_2, n.relu2_2 = conv_bn_scale_relu(n.conv2_1, num_output=128)
n.pool2 = L.Pooling(n.conv2_2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv3_1, n.bn3_1, n.scale3_1, n.relu3_1 = conv_bn_scale_relu(n.pool2, num_output=256)
n.conv3_2, n.bn3_2, n.scale3_2, n.relu3_2 = conv_bn_scale_relu(n.conv3_1, num_output=256)
n.conv3_3, n.bn3_3, n.scale3_3, n.relu3_3 = conv_bn_scale_relu(n.conv3_2, num_output=256)
n.conv3_4, n.bn3_4, n.scale3_4, n.relu3_4 = conv_bn_scale_relu(n.conv3_3, num_output=256)
n.pool3 = L.Pooling(n.conv3_4, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv4_1, n.bn4_1, n.scale4_1, n.relu4_1 = conv_bn_scale_relu(n.pool3, num_output=512)
n.conv4_2, n.bn4_2, n.scale4_2, n.relu4_2 = conv_bn_scale_relu(n.conv4_1, num_output=512)
n.conv4_3, n.bn4_3, n.scale4_3, n.relu4_3 = conv_bn_scale_relu(n.conv4_2, num_output=512)
n.conv4_4, n.bn4_4, n.scale4_4, n.relu4_4 = conv_bn_scale_relu(n.conv4_3, num_output=512)
n.pool4 = L.Pooling(n.conv4_4, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv5_1, n.bn5_1, n.scale5_1, n.relu5_1 = conv_bn_scale_relu(n.pool4, num_output=512)
n.conv5_2, n.bn5_2, n.scale5_2, n.relu5_2 = conv_bn_scale_relu(n.conv5_1, num_output=512)
n.conv5_3, n.bn5_3, n.scale5_3, n.relu5_3 = conv_bn_scale_relu(n.conv5_2, num_output=512)
n.conv5_4, n.bn5_4, n.scale5_4, n.relu5_4 = conv_bn_scale_relu(n.conv5_3, num_output=512)
n.pool5 = L.Pooling(n.conv5_4, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, fc_num_output=4096, dropout_ratio=0.5)
n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, fc_num_output=4096, dropout_ratio=0.5)
n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.fc8, n.label)
return n.to_proto()
| 13,689 | 53.110672 | 112 | py |
caffe-model | caffe-model-master/lenet.py | from caffe import layers as L
from caffe import params as P
import caffe
class LeNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def lenet_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = False
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(scale=0.00390625, mirror=mirror))
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.pool1 = L.Pooling(n.conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.pool2 = L.Pooling(n.conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.ip1 = L.InnerProduct(n.pool2, num_output=500,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=self.classifier_num,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy = L.Accuracy(n.ip2, n.label, include=dict(phase=1))
return n.to_proto()
def lenet_bn_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = False
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(scale=0.00390625, mirror=mirror))
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False)
n.pool1 = L.Pooling(n.bn1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False)
n.pool2 = L.Pooling(n.bn2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.ip1 = L.InnerProduct(n.pool2, num_output=500,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=self.classifier_num,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy = L.Accuracy(n.ip2, n.label, include=dict(phase=1))
return n.to_proto()
| 3,797 | 45.888889 | 104 | py |
caffe-model | caffe-model-master/fractalnet.py | import caffe
from caffe import layers as L
from caffe import params as P
def conv_bn_scale_relu(bottom, num_output=64, kernel_size=1, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def conv_bn_scale(bottom, num_output=64, kernel_size=1, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
return conv, conv_bn, conv_scale
def bn_scale_relu(bottom):
bn = L.BatchNorm(bottom, use_global_stats=False)
scale = L.Scale(bn, scale_param=dict(bias_term=True), in_place=True)
relu = L.ReLU(bn, in_place=True)
return bn, scale, relu
def branch(bottom, num_output=64):
bn0, scale0, relu0 = bn_scale_relu(bottom)
conv1, bn1, scale1, relu1 = conv_bn_scale_relu(bn0, num_output=num_output)
conv2, bn2, scale2, relu2 = conv_bn_scale_relu(conv1, num_output=num_output, kernel_size=3, pad=1)
conv3 = L.Convolution(conv2, num_output=num_output * 4, kernel_size=1, stride=1, pad=0)
return bn0, scale0, relu0, conv1, bn1, scale1, relu1, conv2, bn2, scale2, relu2, conv3
def fractal_block(bottom, base_output=64):
conv1a, bn1a, scale1a = conv_bn_scale(bottom, num_output=base_output * 4)
bn1b0, scale1b0, relu1b0, conv1b1, bn1b1, scale1b1, relu1b1, conv1b2, bn1b2, scale1b2, relu1b2, conv1b3 = \
branch(bottom, num_output=base_output)
eltwise1 = L.Eltwise(conv1a, conv1b3, eltwise_param=dict(operation=1))
conv2a, bn2a, scale2a = conv_bn_scale(eltwise1, num_output=base_output * 4)
bn2b0, scale2b0, relu2b0, conv2b1, bn2b1, scale2b1, relu2b1, conv2b2, bn2b2, scale2b2, relu2b2, conv2b3 = \
branch(eltwise1, num_output=base_output)
conv12a, bn12a, scale12a = conv_bn_scale(bottom, num_output=base_output * 4)
eltwise2 = L.Eltwise(conv2a, conv2b3, conv12a, eltwise_param=dict(operation=1))
conv3a, bn3a, scale3a = conv_bn_scale(eltwise2, num_output=base_output * 4)
bn3b0, scale3b0, relu3b0, conv3b1, bn3b1, scale3b1, relu3b1, conv3b2, bn3b2, scale3b2, relu3b2, conv3b3 = \
branch(eltwise2, num_output=base_output)
eltwise3 = L.Eltwise(conv3a, conv3b3, eltwise_param=dict(operation=1))
conv4a, bn4a, scale4a = conv_bn_scale(eltwise3, num_output=base_output * 4)
bn4b0, scale4b0, relu4b0, conv4b1, bn4b1, scale4b1, relu4b1, conv4b2, bn4b2, scale4b2, relu4b2, conv4b3 = \
branch(eltwise3, num_output=base_output)
conv34a, bn34a, scale34a = conv_bn_scale(eltwise2, num_output=base_output * 4)
conv1234a, bn1234a, scale1234a = conv_bn_scale(bottom, num_output=base_output * 4)
eltwise4 = L.Eltwise(conv4a, conv4b3, conv34a, conv1234a, eltwise_param=dict(operation=1))
return conv1a, bn1a, scale1a, bn1b0, scale1b0, relu1b0, conv1b1, bn1b1, scale1b1, relu1b1, conv1b2, bn1b2, \
scale1b2, relu1b2, conv1b3, eltwise1, conv2a, bn2a, scale2a, bn2b0, scale2b0, relu2b0, conv2b1, bn2b1, \
scale2b1, relu2b1, conv2b2, bn2b2, scale2b2, relu2b2, conv2b3, conv12a, bn12a, scale12a, eltwise2, \
conv3a, bn3a, scale3a, bn3b0, scale3b0, relu3b0, conv3b1, bn3b1, scale3b1, relu3b1, conv3b2, bn3b2, \
scale3b2, relu3b2, conv3b3, eltwise3, conv4a, bn4a, scale4a, bn4b0, scale4b0, relu4b0, conv4b1, bn4b1, \
scale4b1, relu4b1, conv4b2, bn4b2, scale4b2, relu4b2, conv4b3, conv34a, bn34a, scale34a, conv1234a, \
bn1234a, scale1234a, eltwise4
fractal_string = 'n.fractal_(stage)(order)_conv1a, n.fractal_(stage)(order)_bn1a, n.fractal_(stage)(order)_scale1a, \
n.fractal_(stage)(order)_bn1b0, n.fractal_(stage)(order)_scale1b0, n.fractal_(stage)(order)_relu1b0, \
n.fractal_(stage)(order)_conv1b1, n.fractal_(stage)(order)_bn1b1, n.fractal_(stage)(order)_scale1b1, \
n.fractal_(stage)(order)_relu1b1, n.fractal_(stage)(order)_conv1b2, n.fractal_(stage)(order)_bn1b2, \
n.fractal_(stage)(order)_scale1b2, n.fractal_(stage)(order)_relu1b2, n.fractal_(stage)(order)_conv1b3, \
n.fractal_(stage)(order)_eltwise1, n.fractal_(stage)(order)_conv2a, n.fractal_(stage)(order)_bn2a, \
n.fractal_(stage)(order)_scale2a, n.fractal_(stage)(order)_bn2b0, n.fractal_(stage)(order)_scale2b0, \
n.fractal_(stage)(order)_relu2b0, n.fractal_(stage)(order)_conv2b1, n.fractal_(stage)(order)_bn2b1, \
n.fractal_(stage)(order)_scale2b1, n.fractal_(stage)(order)_relu2b1, n.fractal_(stage)(order)_conv2b2, \
n.fractal_(stage)(order)_bn2b2, n.fractal_(stage)(order)_scale2b2, n.fractal_(stage)(order)_relu2b2, \
n.fractal_(stage)(order)_conv2b3, n.fractal_(stage)(order)_conv12a, n.fractal_(stage)(order)_bn12a, \
n.fractal_(stage)(order)_scale12a, n.fractal_(stage)(order)_eltwise2, n.fractal_(stage)(order)_conv3a, \
n.fractal_(stage)(order)_bn3a, n.fractal_(stage)(order)_scale3a, n.fractal_(stage)(order)_bn3b0, \
n.fractal_(stage)(order)_scale3b0, n.fractal_(stage)(order)_relu3b0, n.fractal_(stage)(order)_conv3b1, \
n.fractal_(stage)(order)_bn3b1, n.fractal_(stage)(order)_scale3b1, n.fractal_(stage)(order)_relu3b1, \
n.fractal_(stage)(order)_conv3b2, n.fractal_(stage)(order)_bn3b2, n.fractal_(stage)(order)_scale3b2, \
n.fractal_(stage)(order)_relu3b2, n.fractal_(stage)(order)_conv3b3, n.fractal_(stage)(order)_eltwise3, \
n.fractal_(stage)(order)_conv4a, n.fractal_(stage)(order)_bn4a, n.fractal_(stage)(order)_scale4a, \
n.fractal_(stage)(order)_bn4b0, n.fractal_(stage)(order)_scale4b0, n.fractal_(stage)(order)_relu4b0, \
n.fractal_(stage)(order)_conv4b1, n.fractal_(stage)(order)_bn4b1, n.fractal_(stage)(order)_scale4b1, \
n.fractal_(stage)(order)_relu4b1, n.fractal_(stage)(order)_conv4b2, n.fractal_(stage)(order)_bn4b2, \
n.fractal_(stage)(order)_scale4b2, n.fractal_(stage)(order)_relu4b2, n.fractal_(stage)(order)_conv4b3, \
n.fractal_(stage)(order)_conv34a, n.fractal_(stage)(order)_bn34a, n.fractal_(stage)(order)_scale34a, \
n.fractal_(stage)(order)_conv1234a, n.fractal_(stage)(order)_bn1234a, n.fractal_(stage)(order)_scale1234a, \
n.fractal_(stage)(order)_eltwise4 = fractal_block((bottom), base_output=(num))'
maxpool_string = 'n.pool(order) = L.Pooling((bottom), kernel_size=3, stride=2, pool=P.Pooling.MAX)'
class FractalNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def fractalnet_layers_proto(self, batch_size, phase='TRAIN', stages=(1, 1, 2, 1)):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
conv_bn_scale_relu(n.data, num_output=64, kernel_size=7, stride=2, pad=3) # 64x112x112
n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x56x56
for num in xrange(len(stages)): # num = 0, 1, 2, 3
exec (maxpool_string.replace('(order)', str(num + 1))
.replace('(bottom)', ['n.conv1', 'n.fractal_a%s_eltwise4' % str(stages[0]),
'n.fractal_b%s_eltwise4' % str(stages[1]),
'n.fractal_c%s_eltwise4' % str(stages[2])][num]))
for i in xrange(stages[num]):
exec (fractal_string.replace('(stage)', 'abcd'[num])
.replace('(order)', str(i + 1))
.replace('(num)', str(2 ** num * 64))
.replace('(bottom)', ['n.pool%s' % str(num + 1),
'n.fractal_%s%s_eltwise4' % ('abcd'[num], str(i))][0 < i]))
exec 'n.pool5 = L.Pooling((bottom), pool=P.Pooling.AVE, global_pooling=True)'.\
replace('(bottom)', 'n.fractal_d%s_eltwise4' % str(stages[3]))
n.classifier = L.InnerProduct(n.pool5, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 9,757 | 61.152866 | 117 | py |
caffe-model | caffe-model-master/inception_v1.py | import caffe
from caffe import layers as L
from caffe import params as P
def fc_relu_drop(bottom, fc_param, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=fc_param['num_output'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type=fc_param['weight_type'], std=fc_param['weight_std']),
bias_filler=dict(type='constant', value=fc_param['bias_value']))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def inception(bottom, conv_output):
conv_1x1 = L.Convolution(bottom, kernel_size=1, num_output=conv_output['conv_1x1'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_1x1_relu = L.ReLU(conv_1x1, in_place=True)
conv_3x3_reduce = L.Convolution(bottom, kernel_size=1, num_output=conv_output['conv_3x3_reduce'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_3x3_reduce_relu = L.ReLU(conv_3x3_reduce, in_place=True)
conv_3x3 = L.Convolution(conv_3x3_reduce, kernel_size=3, num_output=conv_output['conv_3x3'], pad=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_3x3_relu = L.ReLU(conv_3x3, in_place=True)
conv_5x5_reduce = L.Convolution(bottom, kernel_size=1, num_output=conv_output['conv_5x5_reduce'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_5x5_reduce_relu = L.ReLU(conv_5x5_reduce, in_place=True)
conv_5x5 = L.Convolution(conv_5x5_reduce, kernel_size=5, num_output=conv_output['conv_5x5'], pad=2,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_5x5_relu = L.ReLU(conv_5x5, in_place=True)
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
pool_proj = L.Convolution(pool, kernel_size=1, num_output=conv_output['pool_proj'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0.2))
pool_proj_relu = L.ReLU(pool_proj, in_place=True)
concat = L.Concat(conv_1x1, conv_3x3, conv_5x5, pool_proj)
return conv_1x1, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_relu, conv_3x3, conv_3x3_relu, conv_5x5_reduce, \
conv_5x5_reduce_relu, conv_5x5, conv_5x5_relu, pool, pool_proj, pool_proj_relu, concat
def inception_bn(bottom, conv_output):
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_1x1'], kernel_size=1)
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_3x3_reduce'], kernel_size=1)
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=conv_output['conv_3x3'], kernel_size=3, pad=1)
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_5x5_reduce'], kernel_size=1)
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu = \
factorization_conv_bn_scale_relu(conv_5x5_reduce, num_output=conv_output['conv_5x5'], kernel_size=5, pad=2)
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu = \
factorization_conv_bn_scale_relu(pool, num_output=conv_output['pool_proj'], kernel_size=1)
concat = L.Concat(conv_1x1, conv_3x3, conv_5x5, pool_proj)
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_bn, \
conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, \
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu, conv_5x5, conv_5x5_bn, \
conv_5x5_scale, conv_5x5_relu, pool, pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu, concat
class InceptionV1(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def inception_v1_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_7x7_s2 = L.Convolution(n.data, num_output=64, kernel_size=7, stride=2, pad=3,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.conv1_relu_7x7 = L.ReLU(n.conv1_7x7_s2, in_place=True)
n.pool1_3x3_s2 = L.Pooling(n.conv1_7x7_s2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
n.pool1_norm1 = L.LRN(n.pool1_3x3_s2, local_size=5, alpha=1e-4, beta=0.75)
n.conv2_3x3_reduce = L.Convolution(n.pool1_norm1, kernel_size=1, num_output=64, stride=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.conv2_relu_3x3_reduce = L.ReLU(n.conv2_3x3_reduce, in_place=True)
n.conv2_3x3 = L.Convolution(n.conv2_3x3_reduce, num_output=192, kernel_size=3, stride=1, pad=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.conv2_relu_3x3 = L.ReLU(n.conv2_3x3, in_place=True)
n.conv2_norm2 = L.LRN(n.conv2_3x3, local_size=5, alpha=1e-4, beta=0.75)
n.pool2_3x3_s2 = L.Pooling(n.conv2_norm2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
n.inception_3a_1x1, n.inception_3a_relu_1x1, n.inception_3a_3x3_reduce, n.inception_3a_relu_3x3_reduce, \
n.inception_3a_3x3, n.inception_3a_relu_3x3, n.inception_3a_5x5_reduce, n.inception_3a_relu_5x5_reduce, \
n.inception_3a_5x5, n.inception_3a_relu_5x5, n.inception_3a_pool, n.inception_3a_pool_proj, \
n.inception_3a_relu_pool_proj, n.inception_3a_output = \
inception(n.pool2_3x3_s2, dict(conv_1x1=64, conv_3x3_reduce=96, conv_3x3=128, conv_5x5_reduce=16,
conv_5x5=32, pool_proj=32))
n.inception_3b_1x1, n.inception_3b_relu_1x1, n.inception_3b_3x3_reduce, n.inception_3b_relu_3x3_reduce, \
n.inception_3b_3x3, n.inception_3b_relu_3x3, n.inception_3b_5x5_reduce, n.inception_3b_relu_5x5_reduce, \
n.inception_3b_5x5, n.inception_3b_relu_5x5, n.inception_3b_pool, n.inception_3b_pool_proj, \
n.inception_3b_relu_pool_proj, n.inception_3b_output = \
inception(n.inception_3a_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=192, conv_5x5_reduce=32,
conv_5x5=96, pool_proj=64))
n.pool3_3x3_s2 = L.Pooling(n.inception_3b_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_4a_1x1, n.inception_4a_relu_1x1, n.inception_4a_3x3_reduce, n.inception_4a_relu_3x3_reduce, \
n.inception_4a_3x3, n.inception_4a_relu_3x3, n.inception_4a_5x5_reduce, n.inception_4a_relu_5x5_reduce, \
n.inception_4a_5x5, n.inception_4a_relu_5x5, n.inception_4a_pool, n.inception_4a_pool_proj, \
n.inception_4a_relu_pool_proj, n.inception_4a_output = \
inception(n.pool3_3x3_s2, dict(conv_1x1=192, conv_3x3_reduce=96, conv_3x3=208, conv_5x5_reduce=16,
conv_5x5=48, pool_proj=64))
# loss 1
n.loss1_ave_pool = L.Pooling(n.inception_4a_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
n.loss1_conv = L.Convolution(n.loss1_ave_pool, num_output=128, kernel_size=1, stride=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.loss1_relu_conv = L.ReLU(n.loss1_conv, in_place=True)
n.loss1_fc, n.loss1_relu_fc, n.loss1_drop_fc = \
fc_relu_drop(n.loss1_conv, dict(num_output=1024, weight_type='xavier', weight_std=1, bias_type='constant',
bias_value=0.2), dropout_ratio=0.7)
n.loss1_classifier = L.InnerProduct(n.loss1_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss1_loss = L.SoftmaxWithLoss(n.loss1_classifier, n.label, loss_weight=0.3)
if phase == 'TRAIN':
pass
else:
n.loss1_accuracy_top1 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1))
n.loss1_accuracy_top5 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
n.inception_4b_1x1, n.inception_4b_relu_1x1, n.inception_4b_3x3_reduce, n.inception_4b_relu_3x3_reduce, \
n.inception_4b_3x3, n.inception_4b_relu_3x3, n.inception_4b_5x5_reduce, n.inception_4b_relu_5x5_reduce, \
n.inception_4b_5x5, n.inception_4b_relu_5x5, n.inception_4b_pool, n.inception_4b_pool_proj, \
n.inception_4b_relu_pool_proj, n.inception_4b_output = \
inception(n.inception_4a_output, dict(conv_1x1=160, conv_3x3_reduce=112, conv_3x3=224, conv_5x5_reduce=24,
conv_5x5=64, pool_proj=64))
n.inception_4c_1x1, n.inception_4c_relu_1x1, n.inception_4c_3x3_reduce, n.inception_4c_relu_3x3_reduce, \
n.inception_4c_3x3, n.inception_4c_relu_3x3, n.inception_4c_5x5_reduce, n.inception_4c_relu_5x5_reduce, \
n.inception_4c_5x5, n.inception_4c_relu_5x5, n.inception_4c_pool, n.inception_4c_pool_proj, \
n.inception_4c_relu_pool_proj, n.inception_4c_output = \
inception(n.inception_4b_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=256, conv_5x5_reduce=24,
conv_5x5=64, pool_proj=64))
n.inception_4d_1x1, n.inception_4d_relu_1x1, n.inception_4d_3x3_reduce, n.inception_4d_relu_3x3_reduce, \
n.inception_4d_3x3, n.inception_4d_relu_3x3, n.inception_4d_5x5_reduce, n.inception_4d_relu_5x5_reduce, \
n.inception_4d_5x5, n.inception_4d_relu_5x5, n.inception_4d_pool, n.inception_4d_pool_proj, \
n.inception_4d_relu_pool_proj, n.inception_4d_output = \
inception(n.inception_4c_output, dict(conv_1x1=112, conv_3x3_reduce=144, conv_3x3=288, conv_5x5_reduce=32,
conv_5x5=64, pool_proj=64))
# loss 2
n.loss2_ave_pool = L.Pooling(n.inception_4d_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
n.loss2_conv = L.Convolution(n.loss2_ave_pool, num_output=128, kernel_size=1, stride=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.loss2_relu_conv = L.ReLU(n.loss2_conv, in_place=True)
n.loss2_fc, n.loss2_relu_fc, n.loss2_drop_fc = \
fc_relu_drop(n.loss2_conv, dict(num_output=1024, weight_type='xavier', weight_std=1, bias_type='constant',
bias_value=0.2), dropout_ratio=0.7)
n.loss2_classifier = L.InnerProduct(n.loss2_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss2_loss = L.SoftmaxWithLoss(n.loss2_classifier, n.label, loss_weight=0.3)
if phase == 'TRAIN':
pass
else:
n.loss2_accuracy_top1 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1))
n.loss2_accuracy_top5 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
n.inception_4e_1x1, n.inception_4e_relu_1x1, n.inception_4e_3x3_reduce, n.inception_4e_relu_3x3_reduce, \
n.inception_4e_3x3, n.inception_4e_relu_3x3, n.inception_4e_5x5_reduce, n.inception_4e_relu_5x5_reduce, \
n.inception_4e_5x5, n.inception_4e_relu_5x5, n.inception_4e_pool, n.inception_4e_pool_proj, \
n.inception_4e_relu_pool_proj, n.inception_4e_output = \
inception(n.inception_4d_output, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320, conv_5x5_reduce=32,
conv_5x5=128, pool_proj=128))
n.pool4_3x3_s2 = L.Pooling(n.inception_4e_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_5a_1x1, n.inception_5a_relu_1x1, n.inception_5a_3x3_reduce, n.inception_5a_relu_3x3_reduce, \
n.inception_5a_3x3, n.inception_5a_relu_3x3, n.inception_5a_5x5_reduce, n.inception_5a_relu_5x5_reduce, \
n.inception_5a_5x5, n.inception_5a_relu_5x5, n.inception_5a_pool, n.inception_5a_pool_proj, \
n.inception_5a_relu_pool_proj, n.inception_5a_output = \
inception(n.pool4_3x3_s2, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320, conv_5x5_reduce=32,
conv_5x5=128, pool_proj=128))
n.inception_5b_1x1, n.inception_5b_relu_1x1, n.inception_5b_3x3_reduce, n.inception_5b_relu_3x3_reduce, \
n.inception_5b_3x3, n.inception_5b_relu_3x3, n.inception_5b_5x5_reduce, n.inception_5b_relu_5x5_reduce, \
n.inception_5b_5x5, n.inception_5b_relu_5x5, n.inception_5b_pool, n.inception_5b_pool_proj, \
n.inception_5b_relu_pool_proj, n.inception_5b_output = \
inception(n.inception_5a_output, dict(conv_1x1=384, conv_3x3_reduce=192, conv_3x3=384, conv_5x5_reduce=48,
conv_5x5=128, pool_proj=128))
n.pool5_7x7_s1 = L.Pooling(n.inception_5b_output, kernel_size=7, stride=1, pool=P.Pooling.AVE)
n.pool5_drop_7x7_s1 = L.Dropout(n.pool5_7x7_s1, in_place=True,
dropout_param=dict(dropout_ratio=0.4))
n.loss3_classifier = L.InnerProduct(n.pool5_7x7_s1, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss3_loss = L.SoftmaxWithLoss(n.loss3_classifier, n.label, loss_weight=1)
if phase == 'TRAIN':
pass
else:
n.loss3_accuracy_top1 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1))
n.loss3_accuracy_top5 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
def inception_bn_proto(self, batch_size, phase='TRAIN'): # inception_bn
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_7x7_s2, n.conv1_7x7_s2_bn, n.conv1_7x7_s2_scale, n.conv1_7x7_relu = \
factorization_conv_bn_scale_relu(n.data, num_output=64, kernel_size=7, stride=2, pad=3)
n.pool1_3x3_s2 = L.Pooling(n.conv1_7x7_s2, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.conv2_3x3_reduce, n.conv2_3x3_reduce_bn, n.conv2_3x3_reduce_scale, n.conv2_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(n.pool1_3x3_s2, num_output=64, kernel_size=1)
n.conv2_3x3, n.conv2_3x3_bn, n.conv2_3x3_scale, n.conv2_3x3_relu = \
factorization_conv_bn_scale_relu(n.conv2_3x3_reduce, num_output=192, kernel_size=3, pad=1)
n.pool2_3x3_s2 = L.Pooling(n.conv2_3x3, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_3a_1x1, n.inception_3a_1x1_bn, n.inception_3a_1x1_scale, n.inception_3a_relu_1x1, \
n.inception_3a_3x3_reduce, n.inception_3a_3x3_reduce_bn, n.inception_3a_3x3_reduce_scale, \
n.inception_3a_relu_3x3_reduce, n.inception_3a_3x3, n.inception_3a_3x3_bn, n.inception_3a_3x3_scale, \
n.inception_3a_relu_3x3, n.inception_3a_5x5_reduce, n.inception_3a_5x5_reduce_bn, \
n.inception_3a_5x5_reduce_scale, n.inception_3a_relu_5x5_reduce, n.inception_3a_5x5, n.inception_3a_5x5_bn, \
n.inception_3a_5x5_scale, n.inception_3a_relu_5x5, n.inception_3a_pool, n.inception_3a_pool_proj, \
n.inception_3a_pool_proj_bn, n.inception_3a_pool_proj_scale, n.inception_3a_relu_pool_proj, \
n.inception_3a_output = \
inception_bn(n.pool2_3x3_s2, dict(conv_1x1=64, conv_3x3_reduce=96, conv_3x3=128, conv_5x5_reduce=16,
conv_5x5=32, pool_proj=32))
n.inception_3b_1x1, n.inception_3b_1x1_bn, n.inception_3b_1x1_scale, n.inception_3b_relu_1x1, \
n.inception_3b_3x3_reduce, n.inception_3b_3x3_reduce_bn, n.inception_3b_3x3_reduce_scale, \
n.inception_3b_relu_3x3_reduce, n.inception_3b_3x3, n.inception_3b_3x3_bn, n.inception_3b_3x3_scale, \
n.inception_3b_relu_3x3, n.inception_3b_5x5_reduce, n.inception_3b_5x5_reduce_bn, \
n.inception_3b_5x5_reduce_scale, n.inception_3b_relu_5x5_reduce, n.inception_3b_5x5, n.inception_3b_5x5_bn, \
n.inception_3b_5x5_scale, n.inception_3b_relu_5x5, n.inception_3b_pool, n.inception_3b_pool_proj, \
n.inception_3b_pool_proj_bn, n.inception_3b_pool_proj_scale, n.inception_3b_relu_pool_proj, \
n.inception_3b_output = \
inception_bn(n.inception_3a_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=192,
conv_5x5_reduce=32, conv_5x5=96, pool_proj=64))
n.pool3_3x3_s2 = L.Pooling(n.inception_3b_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_4a_1x1, n.inception_4a_1x1_bn, n.inception_4a_1x1_scale, n.inception_4a_relu_1x1, \
n.inception_4a_3x3_reduce, n.inception_4a_3x3_reduce_bn, n.inception_4a_3x3_reduce_scale, \
n.inception_4a_relu_3x3_reduce, n.inception_4a_3x3, n.inception_4a_3x3_bn, n.inception_4a_3x3_scale, \
n.inception_4a_relu_3x3, n.inception_4a_5x5_reduce, n.inception_4a_5x5_reduce_bn, \
n.inception_4a_5x5_reduce_scale, n.inception_4a_relu_5x5_reduce, n.inception_4a_5x5, n.inception_4a_5x5_bn, \
n.inception_4a_5x5_scale, n.inception_4a_relu_5x5, n.inception_4a_pool, n.inception_4a_pool_proj, \
n.inception_4a_pool_proj_bn, n.inception_4a_pool_proj_scale, n.inception_4a_relu_pool_proj, \
n.inception_4a_output = \
inception_bn(n.pool3_3x3_s2, dict(conv_1x1=192, conv_3x3_reduce=96, conv_3x3=208, conv_5x5_reduce=16,
conv_5x5=48, pool_proj=64))
# loss 1
n.loss1_ave_pool = L.Pooling(n.inception_4a_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
n.loss1_conv, n.loss1_conv_bn, n.loss1_conv_scale, n.loss1_relu_conv = \
factorization_conv_bn_scale_relu(n.loss1_ave_pool, num_output=128, kernel_size=1)
n.loss1_fc, n.loss1_relu_fc, n.loss1_drop_fc = \
fc_relu_drop(n.loss1_conv, dict(num_output=1024, weight_type='xavier', weight_std=1,
bias_type='constant', bias_value=0.2), dropout_ratio=0.7)
n.loss1_classifier = L.InnerProduct(n.loss1_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss1_loss = L.SoftmaxWithLoss(n.loss1_classifier, n.label, loss_weight=0.3)
if phase == 'TRAIN':
pass
else:
n.loss1_accuracy_top1 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1))
n.loss1_accuracy_top5 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
n.inception_4b_1x1, n.inception_4b_1x1_bn, n.inception_4b_1x1_scale, n.inception_4b_relu_1x1, \
n.inception_4b_3x3_reduce, n.inception_4b_3x3_reduce_bn, n.inception_4b_3x3_reduce_scale, \
n.inception_4b_relu_3x3_reduce, n.inception_4b_3x3, n.inception_4b_3x3_bn, n.inception_4b_3x3_scale, \
n.inception_4b_relu_3x3, n.inception_4b_5x5_reduce, n.inception_4b_5x5_reduce_bn, \
n.inception_4b_5x5_reduce_scale, n.inception_4b_relu_5x5_reduce, n.inception_4b_5x5, n.inception_4b_5x5_bn, \
n.inception_4b_5x5_scale, n.inception_4b_relu_5x5, n.inception_4b_pool, n.inception_4b_pool_proj, \
n.inception_4b_pool_proj_bn, n.inception_4b_pool_proj_scale, n.inception_4b_relu_pool_proj, \
n.inception_4b_output = \
inception_bn(n.inception_4a_output, dict(conv_1x1=160, conv_3x3_reduce=112, conv_3x3=224,
conv_5x5_reduce=24, conv_5x5=64, pool_proj=64))
n.inception_4c_1x1, n.inception_4c_1x1_bn, n.inception_4c_1x1_scale, n.inception_4c_relu_1x1, \
n.inception_4c_3x3_reduce, n.inception_4c_3x3_reduce_bn, n.inception_4c_3x3_reduce_scale, \
n.inception_4c_relu_3x3_reduce, n.inception_4c_3x3, n.inception_4c_3x3_bn, n.inception_4c_3x3_scale, \
n.inception_4c_relu_3x3, n.inception_4c_5x5_reduce, n.inception_4c_5x5_reduce_bn, \
n.inception_4c_5x5_reduce_scale, n.inception_4c_relu_5x5_reduce, n.inception_4c_5x5, n.inception_4c_5x5_bn, \
n.inception_4c_5x5_scale, n.inception_4c_relu_5x5, n.inception_4c_pool, n.inception_4c_pool_proj, \
n.inception_4c_pool_proj_bn, n.inception_4c_pool_proj_scale, n.inception_4c_relu_pool_proj, \
n.inception_4c_output = \
inception_bn(n.inception_4b_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=256,
conv_5x5_reduce=24, conv_5x5=64, pool_proj=64))
n.inception_4d_1x1, n.inception_4d_1x1_bn, n.inception_4d_1x1_scale, n.inception_4d_relu_1x1, \
n.inception_4d_3x3_reduce, n.inception_4d_3x3_reduce_bn, n.inception_4d_3x3_reduce_scale, \
n.inception_4d_relu_3x3_reduce, n.inception_4d_3x3, n.inception_4d_3x3_bn, n.inception_4d_3x3_scale, \
n.inception_4d_relu_3x3, n.inception_4d_5x5_reduce, n.inception_4d_5x5_reduce_bn, \
n.inception_4d_5x5_reduce_scale, n.inception_4d_relu_5x5_reduce, n.inception_4d_5x5, n.inception_4d_5x5_bn, \
n.inception_4d_5x5_scale, n.inception_4d_relu_5x5, n.inception_4d_pool, n.inception_4d_pool_proj, \
n.inception_4d_pool_proj_bn, n.inception_4d_pool_proj_scale, n.inception_4d_relu_pool_proj, \
n.inception_4d_output = \
inception_bn(n.inception_4c_output, dict(conv_1x1=112, conv_3x3_reduce=144, conv_3x3=288,
conv_5x5_reduce=32, conv_5x5=64, pool_proj=64))
# loss 2
n.loss2_ave_pool = L.Pooling(n.inception_4d_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
n.loss2_conv, n.loss2_conv_bn, n.loss2_conv_scale, n.loss2_relu_conv = \
factorization_conv_bn_scale_relu(n.loss2_ave_pool, num_output=128, kernel_size=1)
n.loss2_fc, n.loss2_relu_fc, n.loss2_drop_fc = \
fc_relu_drop(n.loss2_conv, dict(num_output=1024, weight_type='xavier', weight_std=1,
bias_type='constant', bias_value=0.2), dropout_ratio=0.7)
n.loss2_classifier = L.InnerProduct(n.loss2_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss2_loss = L.SoftmaxWithLoss(n.loss2_classifier, n.label, loss_weight=0.3)
if phase == 'TRAIN':
pass
else:
n.loss2_accuracy_top1 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1))
n.loss2_accuracy_top5 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
n.inception_4e_1x1, n.inception_4e_1x1_bn, n.inception_4e_1x1_scale, n.inception_4e_relu_1x1, \
n.inception_4e_3x3_reduce, n.inception_4e_3x3_reduce_bn, n.inception_4e_3x3_reduce_scale, \
n.inception_4e_relu_3x3_reduce, n.inception_4e_3x3, n.inception_4e_3x3_bn, n.inception_4e_3x3_scale, \
n.inception_4e_relu_3x3, n.inception_4e_5x5_reduce, n.inception_4e_5x5_reduce_bn, \
n.inception_4e_5x5_reduce_scale, n.inception_4e_relu_5x5_reduce, n.inception_4e_5x5, n.inception_4e_5x5_bn, \
n.inception_4e_5x5_scale, n.inception_4e_relu_5x5, n.inception_4e_pool, n.inception_4e_pool_proj, \
n.inception_4e_pool_proj_bn, n.inception_4e_pool_proj_scale, n.inception_4e_relu_pool_proj, \
n.inception_4e_output = \
inception_bn(n.inception_4d_output, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320,
conv_5x5_reduce=32, conv_5x5=128, pool_proj=128))
n.pool4_3x3_s2 = L.Pooling(n.inception_4e_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_5a_1x1, n.inception_5a_1x1_bn, n.inception_5a_1x1_scale, n.inception_5a_relu_1x1, \
n.inception_5a_3x3_reduce, n.inception_5a_3x3_reduce_bn, n.inception_5a_3x3_reduce_scale, \
n.inception_5a_relu_3x3_reduce, n.inception_5a_3x3, n.inception_5a_3x3_bn, n.inception_5a_3x3_scale, \
n.inception_5a_relu_3x3, n.inception_5a_5x5_reduce, n.inception_5a_5x5_reduce_bn, \
n.inception_5a_5x5_reduce_scale, n.inception_5a_relu_5x5_reduce, n.inception_5a_5x5, n.inception_5a_5x5_bn, \
n.inception_5a_5x5_scale, n.inception_5a_relu_5x5, n.inception_5a_pool, n.inception_5a_pool_proj, \
n.inception_5a_pool_proj_bn, n.inception_5a_pool_proj_scale, n.inception_5a_relu_pool_proj, \
n.inception_5a_output = \
inception_bn(n.pool4_3x3_s2, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320,
conv_5x5_reduce=32, conv_5x5=128, pool_proj=128))
n.inception_5b_1x1, n.inception_5b_1x1_bn, n.inception_5b_1x1_scale, n.inception_5b_relu_1x1, \
n.inception_5b_3x3_reduce, n.inception_5b_3x3_reduce_bn, n.inception_5b_3x3_reduce_scale, \
n.inception_5b_relu_3x3_reduce, n.inception_5b_3x3, n.inception_5b_3x3_bn, n.inception_5b_3x3_scale, \
n.inception_5b_relu_3x3, n.inception_5b_5x5_reduce, n.inception_5b_5x5_reduce_bn, \
n.inception_5b_5x5_reduce_scale, n.inception_5b_relu_5x5_reduce, n.inception_5b_5x5, n.inception_5b_5x5_bn, \
n.inception_5b_5x5_scale, n.inception_5b_relu_5x5, n.inception_5b_pool, n.inception_5b_pool_proj, \
n.inception_5b_pool_proj_bn, n.inception_5b_pool_proj_scale, n.inception_5b_relu_pool_proj, \
n.inception_5b_output = \
inception_bn(n.inception_5a_output, dict(conv_1x1=384, conv_3x3_reduce=192, conv_3x3=384,
conv_5x5_reduce=48, conv_5x5=128, pool_proj=128))
n.pool5_7x7_s1 = L.Pooling(n.inception_5b_output, kernel_size=7, stride=1, pool=P.Pooling.AVE)
n.pool5_drop_7x7_s1 = L.Dropout(n.pool5_7x7_s1, in_place=True,
dropout_param=dict(dropout_ratio=0.4))
n.loss3_classifier = L.InnerProduct(n.pool5_7x7_s1, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss3_loss = L.SoftmaxWithLoss(n.loss3_classifier, n.label, loss_weight=1)
if phase == 'TRAIN':
pass
else:
n.loss3_accuracy_top1 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1))
n.loss3_accuracy_top5 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 32,045 | 75.665072 | 118 | py |
caffe-model | caffe-model-master/alexnet.py | import caffe
from caffe import layers as L
from caffe import params as P
def conv_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad, group=group,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
relu = L.ReLU(conv, in_place=True)
return conv, relu
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def fc_relu_drop(bottom, num_output=1024, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def accuracy_top1_top5(bottom, label):
accuracy_top1 = L.Accuracy(bottom, label, include=dict(phase=1))
accuracy_top5 = L.Accuracy(bottom, label, include=dict(phase=1), accuracy_param=dict(top_k=5))
return accuracy_top1, accuracy_top5
class AlexNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def alexnet_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.relu1 = conv_relu(n.data, num_output=96, kernel_size=11, stride=4) # 96x55x55
n.norm1 = L.LRN(n.conv1, local_size=5, alpha=0.0001, beta=0.75)
n.pool1 = L.Pooling(n.norm1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 96x27x27
n.conv2, n.relu2 = conv_relu(n.pool1, num_output=256, kernel_size=5, pad=2, group=2) # 256x27x27
n.norm2 = L.LRN(n.conv2, local_size=5, alpha=0.0001, beta=0.75)
n.pool2 = L.Pooling(n.norm2, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 256x13x13
n.conv3, n.relu3 = conv_relu(n.pool2, num_output=384, kernel_size=3, pad=1) # 384x13x13
n.conv4, n.relu4 = conv_relu(n.conv3, num_output=384, kernel_size=3, pad=1, group=2) # 384x13x13
n.conv5, n.relu5 = conv_relu(n.conv4, num_output=256, kernel_size=3, pad=1, group=2) # 256x13x13
n.pool5 = L.Pooling(n.conv5, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 256x6x16
n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, num_output=4096) # 4096x1x1
n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, num_output=4096) # 4096x1x1
n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.fc8, n.label)
return n.to_proto()
def alexnet_bn_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
factorization_conv_bn_scale_relu(n.data, num_output=96, kernel_size=11, stride=4) # 96x55x55
n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 96x27x27
n.conv2, n.conv2_bn, n.conv2_scale, n.conv2_relu = \
factorization_conv_bn_scale_relu(n.pool1, num_output=256, kernel_size=5, pad=2) # 256x27x27
n.pool2 = L.Pooling(n.conv2, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 256x13x13
n.conv3, n.conv3_bn, n.conv3_scale, n.conv3_relu = \
factorization_conv_bn_scale_relu(n.pool2, num_output=384, kernel_size=3, pad=1) # 384x13x13
n.conv4, n.conv4_bn, n.conv4_scale, n.conv4_relu = \
factorization_conv_bn_scale_relu(n.conv3, num_output=384, kernel_size=3, pad=1) # 384x13x13
n.conv5, n.conv5_bn, n.conv5_scale, n.conv5_relu = \
factorization_conv_bn_scale_relu(n.conv4, num_output=256, kernel_size=3, pad=1) # 256x13x13
n.pool5 = L.Pooling(n.conv5, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 256x6x16
n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, num_output=4096) # 4096x1x1
n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, num_output=4096) # 4096x1x1
n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.fc8, n.label)
return n.to_proto()
| 6,674 | 49.954198 | 117 | py |
caffe-model | caffe-model-master/seg/evaluation_seg.py | import sys
sys.path.append('/home/prmct/workspace/PSPNet-0120/python/')
import caffe
import cv2
import numpy as np
import datetime
gpu_mode = True
gpu_id = 3
data_root = '/home/prmct/Database/VOC_PASCAL/VOC2012_test/JPEGImages/'
val_file = 'test_205.txt'
save_root = './predict205_40000_ms/'
model_weights = 'psp_resnext101_32x4d_coco_sbd_iter_40000.caffemodel'
model_deploy = 'deploy_psp_resnext101_32x4d_merge_bn_scale.prototxt'
prob_layer = 'prob' # output layer, normally Softmax
class_num = 21
base_size = 512
crop_size = 473
raw_scale = 57.5 # image scale factor, 1.0 or 128.0
# mean_value = np.array([104.008, 116.669, 122.675])
# mean_value = np.array([128, 128, 128])
mean_value = np.array([103.52, 116.28, 123.675])
scale_array = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] # multi scale
# scale_array = [1.0] # single scale
flip = True
class_offset = 0
crf = False
crf_deploy = '/home/prmct/Program/segmentation/deploy_crf.prototxt'
crf_factor = 4.0
if gpu_mode:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
else:
caffe.set_mode_cpu()
net = caffe.Net(model_deploy, model_weights, caffe.TEST)
if crf:
net_crf = caffe.Net(crf_deploy, caffe.TEST)
def eval_batch():
eval_images = []
f = open(val_file, 'r')
for i in f:
eval_images.append(i.strip())
skip_num = 0
eval_len = len(eval_images)
start_time = datetime.datetime.now()
for i in xrange(eval_len - skip_num):
_img = cv2.imread(data_root + eval_images[i + skip_num] + '.jpg')
h, w, d = _img.shape
score_map = np.zeros((h, w, class_num), dtype=np.float32)
for j in scale_array:
long_size = float(base_size) * j + 1
ratio = long_size / max(h, w)
new_size = (int(w * ratio), int(h * ratio))
_scale = cv2.resize(_img, new_size)
score_map += cv2.resize(scale_process(_scale), (w, h))
score_map /= len(scale_array)
if crf:
tmp_data = np.asarray([_img.transpose(2, 0, 1)], dtype=np.float32)
tmp_score = np.asarray([score_map.transpose(2, 0, 1)], dtype=np.float32)
net_crf.blobs['data'].reshape(*tmp_data.shape)
net_crf.blobs['data'].data[...] = tmp_data / raw_scale
net_crf.blobs['data_dim'].data[...] = [[[h, w]]]
net_crf.blobs['score'].reshape(*tmp_score.shape)
net_crf.blobs['score'].data[...] = tmp_score * crf_factor
net_crf.forward()
score_map = net_crf.blobs[prob_layer].data[0].transpose(1, 2, 0)
cv2.imwrite(save_root + eval_images[i + skip_num] + '.png', score_map.argmax(2) + class_offset)
print 'Testing image: ' + str(i + 1) + '/' + str(eval_len) + ' ' + str(eval_images[i + skip_num])
end_time = datetime.datetime.now()
print '\nEvaluation process ends at: {}. \nTime cost is: {}. '.format(str(end_time), str(end_time - start_time))
print '\n{} images has been tested. \nThe model is: {}'.format(str(eval_len), model_weights)
def scale_process(_scale):
sh, sw, sd = _scale.shape
_scale = np.asarray(_scale, dtype=np.float32)
long_size = max(sh, sw)
short_size = min(sh, sw)
if long_size <= crop_size:
input_data = pad_img(_scale - mean_value)
score = caffe_process(input_data)[:sh, :sw, :]
else:
stride_rate = 2.0 / 3
stride = np.ceil(crop_size * stride_rate)
_pad = _scale
if short_size < crop_size:
_pad = pad_img(_scale - mean_value) + mean_value
ph, pw, pd = _pad.shape
h_grid = int(np.ceil(float(ph - crop_size) / stride)) + 1
w_grid = int(np.ceil(float(pw - crop_size) / stride)) + 1
data_scale = np.zeros((ph, pw, class_num), dtype=np.float32)
count_scale = np.zeros((ph, pw, class_num), dtype=np.float32)
for grid_yidx in xrange(0, h_grid):
for grid_xidx in xrange(0, w_grid):
s_x = int(grid_xidx * stride)
s_y = int(grid_yidx * stride)
e_x = min(s_x + crop_size, pw)
e_y = min(s_y + crop_size, ph)
s_x = int(e_x - crop_size)
s_y = int(e_y - crop_size)
_sub = _pad[s_y:e_y, s_x:e_x, :]
count_scale[s_y:e_y, s_x:e_x, :] += 1.0
input_data = pad_img(_sub - mean_value)
data_scale[s_y:e_y, s_x:e_x, :] += caffe_process(input_data)
score = data_scale / count_scale
score = score[:sh, :sw, :]
return score
def pad_img(_scale):
sh, sw, sd = _scale.shape
if sh < crop_size:
_pad = np.zeros((crop_size, sw, sd), dtype=np.float32)
_pad[:sh, :, :] = _scale
_scale = _pad
sh, sw, sd = _scale.shape
if sw < crop_size:
_pad = np.zeros((sh, crop_size, sd), dtype=np.float32)
_pad[:, :sw, :] = _scale
_scale = _pad
return _scale
def caffe_process(_input):
h, w, d = _input.shape
_score = np.zeros((h, w, class_num), dtype=np.float32)
if flip:
_flip = _input[:, ::-1]
_flip = _flip.transpose(2, 0, 1)
_flip = _flip.reshape((1,) + _flip.shape)
net.blobs['data'].reshape(*_flip.shape)
net.blobs['data'].data[...] = _flip / raw_scale
# net.blobs['data_dim'].data[...] = [[[h, w]]]
net.forward()
_score += net.blobs[prob_layer].data[0].transpose(1, 2, 0)[:, ::-1]
_input = _input.transpose(2, 0, 1)
_input = _input.reshape((1,) + _input.shape)
net.blobs['data'].reshape(*_input.shape)
net.blobs['data'].data[...] = _input / raw_scale
# net.blobs['data_dim'].data[...] = [[[h, w]]]
net.forward()
_score += net.blobs[prob_layer].data[0].transpose(1, 2, 0)
return _score / int(flip + 1)
if __name__ == '__main__':
eval_batch()
| 5,801 | 34.595092 | 116 | py |
caffe-model | caffe-model-master/cls/evaluation_cls.py | import sys
sys.path.append('~/caffe-master-0116/python')
import numpy as np
import caffe
import cv2
import datetime
gpu_mode = True
gpu_id = 0
data_root = '~/Database/ILSVRC2012'
val_file = 'ILSVRC2012_val.txt'
save_log = 'log{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
model_weights = 'resnet-v2/resnet101_v2.caffemodel'
model_deploy = 'resnet-v2/deploy_resnet101_v2.prototxt'
prob_layer = 'prob'
class_num = 1000
base_size = 256 # short size
crop_size = 224
# mean_value = np.array([128.0, 128.0, 128.0]) # BGR
mean_value = np.array([102.9801, 115.9465, 122.7717]) # BGR
# std = np.array([128.0, 128.0, 128.0]) # BGR
std = np.array([1.0, 1.0, 1.0]) # BGR
crop_num = 1 # 1 and others for center(single)-crop, 12 for mirror(12)-crop, 144 for multi(144)-crop
batch_size = 1
top_k = (1, 5)
if gpu_mode:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
else:
caffe.set_mode_cpu()
net = caffe.Net(model_deploy, model_weights, caffe.TEST)
def eval_batch():
eval_images = []
ground_truth = []
f = open(val_file, 'r')
for i in f:
eval_images.append(i.strip().split(' ')[0])
ground_truth.append(int(i.strip().split(' ')[1]))
f.close()
skip_num = 0
eval_len = len(eval_images)
accuracy = np.zeros(len(top_k))
# eval_len = 100
start_time = datetime.datetime.now()
for i in xrange(eval_len - skip_num):
_img = cv2.imread(data_root + eval_images[i + skip_num])
_img = cv2.resize(_img, (int(_img.shape[1] * base_size / min(_img.shape[:2])),
int(_img.shape[0] * base_size / min(_img.shape[:2])))
)
_img = image_preprocess(_img)
score_vec = np.zeros(class_num, dtype=np.float32)
crops = []
if crop_num == 1:
crops.append(center_crop(_img))
elif crop_num == 12:
crops.extend(mirror_crop(_img))
elif crop_num == 144:
crops.extend(multi_crop(_img))
else:
crops.append(center_crop(_img))
iter_num = int(len(crops) / batch_size)
for j in xrange(iter_num):
score_vec += caffe_process(np.asarray(crops, dtype=np.float32)[j*batch_size:(j+1)*batch_size])
score_index = (-score_vec / len(crops)).argsort()
print 'Testing image: ' + str(i + 1) + '/' + str(eval_len - skip_num) + ' ' + str(score_index[0]) + '/' + str(
ground_truth[i + skip_num]),
for j in xrange(len(top_k)):
if ground_truth[i + skip_num] in score_index[:top_k[j]]:
accuracy[j] += 1
tmp_acc = float(accuracy[j]) / float(i + 1)
if top_k[j] == 1:
print '\ttop_' + str(top_k[j]) + ':' + str(tmp_acc),
else:
print 'top_' + str(top_k[j]) + ':' + str(tmp_acc)
end_time = datetime.datetime.now()
w = open(save_log, 'w')
s1 = 'Evaluation process ends at: {}. \nTime cost is: {}. '.format(str(end_time), str(end_time - start_time))
s2 = '\nThe model is: {}. \nThe val file is: {}. \n{} images has been tested, crop_num is: {}, base_size is: {}, ' \
'crop_size is: {}.'.format(model_weights, val_file, str(eval_len), str(crop_num), str(base_size), str(crop_size))
s3 = '\nThe mean value is: ({}, {}, {}).'.format(str(mean_value[0]), str(mean_value[1]), str(mean_value[2]))
s4 = ''
for i in xrange(len(top_k)):
_acc = float(accuracy[i]) / float(eval_len)
s4 += '\nAccuracy of top_{} is: {}; correct num is {}.'.format(str(top_k[i]), str(_acc), str(int(accuracy[i])))
print s1, s2, s3, s4
w.write(s1 + s2 + s3 + s4)
w.close()
def image_preprocess(img):
b, g, r = cv2.split(img)
return cv2.merge([(b-mean_value[0])/std[0], (g-mean_value[1])/std[1], (r-mean_value[2])/std[2]])
def center_crop(img): # single crop
short_edge = min(img.shape[:2])
if short_edge < crop_size:
return
yy = int((img.shape[0] - crop_size) / 2)
xx = int((img.shape[1] - crop_size) / 2)
return img[yy: yy + crop_size, xx: xx + crop_size]
def over_sample(img): # 12 crops of image
short_edge = min(img.shape[:2])
if short_edge < crop_size:
return
yy = int((img.shape[0] - crop_size) / 2)
xx = int((img.shape[1] - crop_size) / 2)
sample_list = [img[:crop_size, :crop_size], img[-crop_size:, -crop_size:], img[:crop_size, -crop_size:],
img[-crop_size:, :crop_size], img[yy: yy + crop_size, xx: xx + crop_size],
cv2.resize(img, (crop_size, crop_size))]
return sample_list
def mirror_crop(img): # 12*len(size_list) crops
crop_list = []
img_resize = cv2.resize(img, (base_size, base_size))
mirror = img_resize[:, ::-1]
crop_list.extend(over_sample(img_resize))
crop_list.extend(over_sample(mirror))
return crop_list
def multi_crop(img): # 144(12*12) crops
crop_list = []
size_list = [256, 288, 320, 352] # crop_size: 224
# size_list = [270, 300, 330, 360] # crop_size: 235
# size_list = [320, 352, 384, 416] # crop_size: 299
# size_list = [352, 384, 416, 448] # crop_size: 320
short_edge = min(img.shape[:2])
for i in size_list:
img_resize = cv2.resize(img, (img.shape[1] * i / short_edge, img.shape[0] * i / short_edge))
yy = int((img_resize.shape[0] - i) / 2)
xx = int((img_resize.shape[1] - i) / 2)
for j in xrange(3):
left_center_right = img_resize[yy * j: yy * j + i, xx * j: xx * j + i]
mirror = left_center_right[:, ::-1]
crop_list.extend(over_sample(left_center_right))
crop_list.extend(over_sample(mirror))
return crop_list
def caffe_process(_input):
_input = _input.transpose(0, 3, 1, 2)
net.blobs['data'].reshape(*_input.shape)
net.blobs['data'].data[...] = _input
net.forward()
return np.sum(net.blobs[prob_layer].data, axis=0)
if __name__ == '__main__':
eval_batch()
| 5,974 | 35.212121 | 122 | py |
caffe-model | caffe-model-master/cls/inception-v3/inception_v3.py | import caffe
from caffe import layers as L
from caffe import params as P
def fc_relu_drop(bottom, num_output=1024, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def factorization_conv_mxn(bottom, num_output=64, kernel_h=1, kernel_w=7, stride=1, pad_h=3, pad_w=0):
conv_mxn = L.Convolution(bottom, num_output=num_output, kernel_h=kernel_h, kernel_w=kernel_w, stride=stride,
pad_h=pad_h, pad_w=pad_w,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_mxn_bn = L.BatchNorm(conv_mxn, use_global_stats=False, in_place=True)
conv_mxn_scale = L.Scale(conv_mxn, scale_param=dict(bias_term=True), in_place=True)
conv_mxn_relu = L.ReLU(conv_mxn, in_place=True)
return conv_mxn, conv_mxn_bn, conv_mxn_scale, conv_mxn_relu
def stem_v3_299x299(bottom):
"""
input:3x299x299
output:192x35x35
:param bottom: bottom layer
:return: layers
"""
conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=3, stride=2) # 32x149x149
conv2_3x3_s1, conv2_3x3_s1_bn, conv2_3x3_s1_scale, conv2_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv1_3x3_s2, num_output=32, kernel_size=3) # 32x147x147
conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv2_3x3_s1, num_output=64, kernel_size=3, pad=1) # 64x147x147
pool1_3x3_s2 = L.Pooling(conv3_3x3_s1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x73x73
conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(pool1_3x3_s2, num_output=80, kernel_size=1) # 80x73x73
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu = \
factorization_conv_bn_scale_relu(conv4_3x3_reduce, num_output=192, kernel_size=3) # 192x71x71
pool2_3x3_s2 = L.Pooling(conv4_3x3, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 192x35x35
return conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu, conv2_3x3_s1, conv2_3x3_s1_bn, \
conv2_3x3_s1_scale, conv2_3x3_s1_relu, conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu, \
pool1_3x3_s2, conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu, \
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu, pool2_3x3_s2
def inception_v3_a(bottom, pool_proj_num_output=32):
"""
input:192or256or288x35x35
output:256or288x35x35
:param pool_proj_num_output: num_output of pool_proj
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=1) # 64x35x35
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=48, kernel_size=1) # 48x35x35
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu = \
factorization_conv_bn_scale_relu(conv_5x5_reduce, num_output=64, kernel_size=5, pad=2) # 64x35x35
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, kernel_size=1, num_output=64) # 64x35x35
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, kernel_size=3, num_output=96, pad=1) # 96x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3, kernel_size=3, num_output=96, pad=1) # 96x35x35
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 192x35x35
pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu = \
factorization_conv_bn_scale_relu(pool, kernel_size=1, num_output=pool_proj_num_output) # 32x35x35
concat = L.Concat(conv_1x1, conv_5x5, conv_3x3_2, pool_proj) # 256or288(64+64+96+32or64)x35x35
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_5x5_reduce, conv_5x5_reduce_bn, \
conv_5x5_reduce_scale, conv_5x5_reduce_relu, conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu, \
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, \
conv_3x3_scale, conv_3x3_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, pool, \
pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu, concat
def reduction_v3_a(bottom):
"""
input:288x35x35
output:768x17x17
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 384x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, kernel_size=3, num_output=384, stride=2) # 384x17x17
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=1) # 64x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=96, kernel_size=3, pad=1) # 96x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=96, kernel_size=3, stride=2) # 96x17x17
concat = L.Concat(pool, conv_3x3, conv_3x3_3) # 768(288+384+96)x17x17
return pool, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, concat
def inception_v3_b(bottom, outs=128):
"""
input:768x17x17
output:768x17x17
:param outs: num_outputs
:param bottom: bottom layer
:return: layers
"""
pool_ave = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 768x17x17
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool_ave, num_output=192, kernel_size=1) # 192x17x17
conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, conv_1x1_2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=outs, kernel_size=1) # outsx17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=outs, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # outsx17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 192x17x17
conv_7x1_reduce, conv_7x1_reduce_bn, conv_7x1_reduce_scale, conv_7x1_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=outs, kernel_size=1) # outsx17x17
conv_7x1_2, conv_7x1_2_bn, conv_7x1_2_scale, conv_7x1_2_relu = \
factorization_conv_mxn(conv_7x1_reduce, num_output=outs, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # outsx17x17
conv_1x7_2, conv_1x7_2_bn, conv_1x7_2_scale, conv_1x7_2_relu = \
factorization_conv_mxn(conv_7x1_2, num_output=outs, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # outsx17x17
conv_7x1_3, conv_7x1_3_bn, conv_7x1_3_scale, conv_7x1_3_relu = \
factorization_conv_mxn(conv_1x7_2, num_output=outs, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # outsx17x17
conv_1x7_3, conv_1x7_3_bn, conv_1x7_3_scale, conv_1x7_3_relu = \
factorization_conv_mxn(conv_7x1_3, num_output=192, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 192x17x17
concat = L.Concat(conv_1x1_2, conv_7x1, conv_1x7_3, conv_1x1) # 768(192+192+192+192)x17x17
return pool_ave, conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, \
conv_1x1_2_relu, conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu, \
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, \
conv_7x1_reduce, conv_7x1_reduce_bn, conv_7x1_reduce_scale, conv_7x1_reduce_relu, conv_7x1_2, conv_7x1_2_bn, \
conv_7x1_2_scale, conv_7x1_2_relu, conv_1x7_2, conv_1x7_2_bn, conv_1x7_2_scale, conv_1x7_2_relu, conv_7x1_3, \
conv_7x1_3_bn, conv_7x1_3_scale, conv_7x1_3_relu, conv_1x7_3, conv_1x7_3_bn, conv_1x7_3_scale, conv_1x7_3_relu, \
concat
def reduction_v3_b(bottom):
"""
input:768x17x17
output:1280x8x8
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 768x8x8
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=320, kernel_size=3, stride=2) # 320x8x8
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=192, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 192x17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 192x17x17
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_7x1, num_output=192, kernel_size=3, stride=2) # 192x8x8
concat = L.Concat(pool, conv_3x3, conv_3x3_2) # 1280(768+320+192)x8x8
return pool, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, \
conv_3x3_scale, conv_3x3_relu, conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu, \
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, \
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, concat
def inception_v3_c(bottom, pool=P.Pooling.AVE):
"""
input:1280or2048x8x8
output:2048x8x8
:param pool: pool_type
:param bottom: bottom layer
:return: layers
"""
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=pool) # 1280or2048x8x8
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool, num_output=192, kernel_size=1) # 192x8x8
conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, conv_1x1_2_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=320, kernel_size=1) # 320x8x8
conv_1x3_reduce, conv_1x3_reduce_bn, conv_1x3_reduce_scale, conv_1x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=1) # 384x8x8
conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu = \
factorization_conv_mxn(conv_1x3_reduce, num_output=384, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 384x8x8
conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu = \
factorization_conv_mxn(conv_1x3_reduce, num_output=384, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 384x8x8
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=448, kernel_size=1) # 448x8x8
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=384, kernel_size=3, pad=1) # 384x8x8
conv_1x3_2, conv_1x3_2_bn, conv_1x3_2_scale, conv_1x3_2_relu = \
factorization_conv_mxn(conv_3x3, num_output=384, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 384x8x8
conv_3x1_2, conv_3x1_2_bn, conv_3x1_2_scale, conv_3x1_2_relu = \
factorization_conv_mxn(conv_3x3, num_output=384, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 384x8x8
concat = L.Concat(conv_1x1_2, conv_1x3, conv_3x1, conv_1x3_2, conv_3x1_2, conv_1x1) # 2048(192+320+384+384+384+384)x8x8
return pool, conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x1_2, conv_1x1_2_bn, conv_1x1_2_scale, \
conv_1x1_2_relu, conv_1x3_reduce, conv_1x3_reduce_bn, conv_1x3_reduce_scale, conv_1x3_reduce_relu, conv_1x3, \
conv_1x3_bn, conv_1x3_scale, conv_1x3_relu, conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu, \
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, \
conv_3x3_scale, conv_3x3_relu, conv_1x3_2, conv_1x3_2_bn, conv_1x3_2_scale, conv_1x3_2_relu, conv_3x1_2, \
conv_3x1_2_bn, conv_3x1_2_scale, conv_3x1_2_relu, concat
class InceptionV3(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def inception_v3_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=299, mean_value=[104, 117, 123], mirror=mirror))
# stem
n.conv1_3x3_s2, n.conv1_3x3_s2_bn, n.conv1_3x3_s2_scale, n.conv1_3x3_relu, n.conv2_3x3_s1, n.conv2_3x3_s1_bn, \
n.conv2_3x3_s1_scale, n.conv2_3x3_relu, n.conv3_3x3_s1, n.conv3_3x3_s1_bn, n.conv3_3x3_s1_scale, n.conv3_3x3_relu, \
n.pool1_3x3_s2, n.conv4_3x3_reduce, n.conv4_3x3_reduce_bn, n.conv4_relu_3x3_reduce, n.conv4_3x3_reduce_scale, \
n.conv4_3x3, n.conv4_3x3_bn, n.conv4_3x3_scale, n.conv4_relu_3x3, n.pool2_3x3_s2 = \
stem_v3_299x299(n.data) # 192x35x35
# 3 x inception_v3_a
n.inception_a1_1x1, n.inception_a1_1x1_bn, n.inception_a1_1x1_scale, n.inception_a1_1x1_relu, n.inception_a1_5x5_reduce, \
n.inception_a1_5x5_reduce_bn, n.inception_a1_5x5_reduce_scale, n.inception_a1_5x5_reduce_relu, n.inception_a1_5x5, \
n.inception_a1_5x5_bn, n.inception_a1_5x5_scale, n.inception_a1_5x5_relu, n.inception_a1_3x3_reduce, \
n.inception_a1_3x3_reduce_bn, n.inception_a1_3x3_reduce_scale, n.inception_a1_3x3_reduce_relu, n.inception_a1_3x3_1, \
n.inception_a1_3x3_bn, n.inception_a1_3x3_scale, n.inception_a1_3x3_relu, n.inception_a1_3x3_2, \
n.inception_a1_3x3_2_bn, n.inception_a1_3x3_2_scale, n.inception_a1_3x3_2_relu, n.inception_a1_pool, \
n.inception_a1_pool_proj, n.inception_a1_pool_proj_bn, n.inception_a1_pool_proj_scale, n.inception_a1_pool_proj_relu, \
n.inception_a1_output = \
inception_v3_a(n.pool2_3x3_s2) # 256x35x35
n.inception_a2_1x1, n.inception_a2_1x1_bn, n.inception_a2_1x1_scale, n.inception_a2_1x1_relu, n.inception_a2_5x5_reduce, \
n.inception_a2_5x5_reduce_bn, n.inception_a2_5x5_reduce_scale, n.inception_a2_5x5_reduce_relu, n.inception_a2_5x5, \
n.inception_a2_5x5_bn, n.inception_a2_5x5_scale, n.inception_a2_5x5_relu, n.inception_a2_3x3_reduce, \
n.inception_a2_3x3_reduce_bn, n.inception_a2_3x3_reduce_scale, n.inception_a2_3x3_reduce_relu, n.inception_a2_3x3_1, \
n.inception_a2_3x3_bn, n.inception_a2_3x3_scale, n.inception_a2_3x3_relu, n.inception_a2_3x3_2, \
n.inception_a2_3x3_2_bn, n.inception_a2_3x3_2_scale, n.inception_a2_3x3_2_relu, n.inception_a2_pool, \
n.inception_a2_pool_proj, n.inception_a2_pool_proj_bn, n.inception_a2_pool_proj_scale, n.inception_a2_pool_proj_relu, \
n.inception_a2_output = \
inception_v3_a(n.inception_a1_output, pool_proj_num_output=64) # 288x35x35
n.inception_a3_1x1, n.inception_a3_1x1_bn, n.inception_a3_1x1_scale, n.inception_a3_1x1_relu, n.inception_a3_5x5_reduce, \
n.inception_a3_5x5_reduce_bn, n.inception_a3_5x5_reduce_scale, n.inception_a3_5x5_reduce_relu, n.inception_a3_5x5, \
n.inception_a3_5x5_bn, n.inception_a3_5x5_scale, n.inception_a3_5x5_relu, n.inception_a3_3x3_reduce, \
n.inception_a3_3x3_reduce_bn, n.inception_a3_3x3_reduce_scale, n.inception_a3_3x3_reduce_relu, n.inception_a3_3x3_1, \
n.inception_a3_3x3_bn, n.inception_a3_3x3_scale, n.inception_a3_3x3_relu, n.inception_a3_3x3_2, \
n.inception_a3_3x3_2_bn, n.inception_a3_3x3_2_scale, n.inception_a3_3x3_2_relu, n.inception_a3_pool, \
n.inception_a3_pool_proj, n.inception_a3_pool_proj_bn, n.inception_a3_pool_proj_scale, n.inception_a3_pool_proj_relu, \
n.inception_a3_output = \
inception_v3_a(n.inception_a2_output, pool_proj_num_output=64) # 288x35x35
# reduction_v3_a
n.reduction_a_pool, n.reduction_a_3x3, n.reduction_a_3x3_bn, n.reduction_a_3x3_scale, n.reduction_a_3x3_relu, \
n.reduction_a_3x3_2_reduce, n.reduction_a_3x3_2_reduce_bn, n.reduction_a_3x3_2_reduce_scale, n.reduction_a_3x3_2_reduce_relu, \
n.reduction_a_3x3_2, n.reduction_a_3x3_2_bn, n.reduction_a_3x3_2_scale, n.reduction_a_3x3_2_relu, n.reduction_a_3x3_3, \
n.reduction_a_3x3_3_bn, n.reduction_a_3x3_3_scale, n.reduction_a_3x3_3_relu, n.reduction_a_concat = \
reduction_v3_a(n.inception_a3_output) # 768x17x17
# 4 x inception_v3_b
n.inception_b1_pool_ave, n.inception_b1_1x1, n.inception_b1_1x1_bn, n.inception_b1_1x1_scale, n.inception_b1_1x1_relu, \
n.inception_b1_1x1_2, n.inception_b1_1x1_2_bn, n.inception_b1_1x1_2_scale, n.inception_b1_1x1_2_relu, \
n.inception_b1_1x7_reduce, n.inception_b1_1x7_reduce_bn, n.inception_b1_1x7_reduce_scale, n.inception_b1_1x7_reduce_relu, \
n.inception_b1_1x7, n.inception_b1_1x7_bn, n.inception_b1_1x7_scale, n.inception_b1_1x7_relu, n.inception_b1_7x1, \
n.inception_b1_7x1_bn, n.inception_b1_7x1_scale, n.inception_b1_7x1_relu, n.inception_b1_7x1_reduce, n.inception_b1_7x1_reduce_bn, \
n.inception_b1_7x1_reduce_scale, n.inception_b1_7x1_reduce_relu, n.inception_b1_7x1_2, n.inception_b1_7x1_2_bn, \
n.inception_b1_7x1_2_scale, n.inception_b1_7x1_2_relu, n.inception_b1_1x7_2, n.inception_b1_1x7_2_bn, n.inception_b1_1x7_2_scale, \
n.inception_b1_1x7_2_relu, n.inception_b1_7x1_3, n.inception_b1_7x1_3_bn, n.inception_b1_7x1_3_scale, n.inception_b1_7x1_3_relu, \
n.inception_b1_1x7_3, n.inception_b1_1x7_3_bn, n.inception_b1_1x7_3_scale, n.inception_b1_1x7_3_relu, n.inception_b1_concat = \
inception_v3_b(n.reduction_a_concat, outs=128) # 768x17x17
n.inception_b2_pool_ave, n.inception_b2_1x1, n.inception_b2_1x1_bn, n.inception_b2_1x1_scale, n.inception_b2_1x1_relu, \
n.inception_b2_1x1_2, n.inception_b2_1x1_2_bn, n.inception_b2_1x1_2_scale, n.inception_b2_1x1_2_relu, \
n.inception_b2_1x7_reduce, n.inception_b2_1x7_reduce_bn, n.inception_b2_1x7_reduce_scale, n.inception_b2_1x7_reduce_relu, \
n.inception_b2_1x7, n.inception_b2_1x7_bn, n.inception_b2_1x7_scale, n.inception_b2_1x7_relu, n.inception_b2_7x1, \
n.inception_b2_7x1_bn, n.inception_b2_7x1_scale, n.inception_b2_7x1_relu, n.inception_b2_7x1_reduce, n.inception_b2_7x1_reduce_bn, \
n.inception_b2_7x1_reduce_scale, n.inception_b2_7x1_reduce_relu, n.inception_b2_7x1_2, n.inception_b2_7x1_2_bn, \
n.inception_b2_7x1_2_scale, n.inception_b2_7x1_2_relu, n.inception_b2_1x7_2, n.inception_b2_1x7_2_bn, n.inception_b2_1x7_2_scale, \
n.inception_b2_1x7_2_relu, n.inception_b2_7x1_3, n.inception_b2_7x1_3_bn, n.inception_b2_7x1_3_scale, n.inception_b2_7x1_3_relu, \
n.inception_b2_1x7_3, n.inception_b2_1x7_3_bn, n.inception_b2_1x7_3_scale, n.inception_b2_1x7_3_relu, n.inception_b2_concat = \
inception_v3_b(n.inception_b1_concat, outs=160) # 768x17x17
n.inception_b3_pool_ave, n.inception_b3_1x1, n.inception_b3_1x1_bn, n.inception_b3_1x1_scale, n.inception_b3_1x1_relu, \
n.inception_b3_1x1_2, n.inception_b3_1x1_2_bn, n.inception_b3_1x1_2_scale, n.inception_b3_1x1_2_relu, \
n.inception_b3_1x7_reduce, n.inception_b3_1x7_reduce_bn, n.inception_b3_1x7_reduce_scale, n.inception_b3_1x7_reduce_relu, \
n.inception_b3_1x7, n.inception_b3_1x7_bn, n.inception_b3_1x7_scale, n.inception_b3_1x7_relu, n.inception_b3_7x1, \
n.inception_b3_7x1_bn, n.inception_b3_7x1_scale, n.inception_b3_7x1_relu, n.inception_b3_7x1_reduce, n.inception_b3_7x1_reduce_bn, \
n.inception_b3_7x1_reduce_scale, n.inception_b3_7x1_reduce_relu, n.inception_b3_7x1_2, n.inception_b3_7x1_2_bn, \
n.inception_b3_7x1_2_scale, n.inception_b3_7x1_2_relu, n.inception_b3_1x7_2, n.inception_b3_1x7_2_bn, n.inception_b3_1x7_2_scale, \
n.inception_b3_1x7_2_relu, n.inception_b3_7x1_3, n.inception_b3_7x1_3_bn, n.inception_b3_7x1_3_scale, n.inception_b3_7x1_3_relu, \
n.inception_b3_1x7_3, n.inception_b3_1x7_3_bn, n.inception_b3_1x7_3_scale, n.inception_b3_1x7_3_relu, n.inception_b3_concat = \
inception_v3_b(n.inception_b2_concat, outs=160) # 768x17x17
n.inception_b4_pool_ave, n.inception_b4_1x1, n.inception_b4_1x1_bn, n.inception_b4_1x1_scale, n.inception_b4_1x1_relu, \
n.inception_b4_1x1_2, n.inception_b4_1x1_2_bn, n.inception_b4_1x1_2_scale, n.inception_b4_1x1_2_relu, \
n.inception_b4_1x7_reduce, n.inception_b4_1x7_reduce_bn, n.inception_b4_1x7_reduce_scale, n.inception_b4_1x7_reduce_relu, \
n.inception_b4_1x7, n.inception_b4_1x7_bn, n.inception_b4_1x7_scale, n.inception_b4_1x7_relu, n.inception_b4_7x1, \
n.inception_b4_7x1_bn, n.inception_b4_7x1_scale, n.inception_b4_7x1_relu, n.inception_b4_7x1_reduce, n.inception_b4_7x1_reduce_bn, \
n.inception_b4_7x1_reduce_scale, n.inception_b4_7x1_reduce_relu, n.inception_b4_7x1_2, n.inception_b4_7x1_2_bn, \
n.inception_b4_7x1_2_scale, n.inception_b4_7x1_2_relu, n.inception_b4_1x7_2, n.inception_b4_1x7_2_bn, n.inception_b4_1x7_2_scale, \
n.inception_b4_1x7_2_relu, n.inception_b4_7x1_3, n.inception_b4_7x1_3_bn, n.inception_b4_7x1_3_scale, n.inception_b4_7x1_3_relu, \
n.inception_b4_1x7_3, n.inception_b4_1x7_3_bn, n.inception_b4_1x7_3_scale, n.inception_b4_1x7_3_relu, n.inception_b4_concat = \
inception_v3_b(n.inception_b3_concat, outs=192) # 768x17x17
# loss 1
n.auxiliary_loss_ave_pool = L.Pooling(n.inception_b4_concat, kernel_size=5, stride=3,
pool=P.Pooling.AVE) # 768x5x5
n.auxiliary_loss_conv, n.auxiliary_loss_conv_bn, n.auxiliary_loss_conv_scale, n.auxiliary_loss_relu_conv = \
factorization_conv_bn_scale_relu(n.auxiliary_loss_ave_pool, num_output=128, kernel_size=1) # 128x1x1
n.auxiliary_loss_fc = L.InnerProduct(n.auxiliary_loss_conv, num_output=768,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
n.auxiliary_loss_fc_relu = L.ReLU(n.auxiliary_loss_fc, in_place=True)
n.auxiliary_loss_classifier = L.InnerProduct(n.auxiliary_loss_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1),
dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.auxiliary_loss = L.SoftmaxWithLoss(n.auxiliary_loss_classifier, n.label, loss_weight=0.4)
# reduction_v3_b
n.reduction_b_pool, n.reduction_b_3x3_reduce, n.reduction_b_3x3_reduce_bn, n.reduction_b_3x3_reduce_scale, \
n.reduction_b_3x3_reduce_relu, n.reduction_b_3x3, n.reduction_b_3x3_bn, n.reduction_b_3x3_scale, n.reduction_b_3x3_relu, \
n.reduction_b_1x7_reduce, n.reduction_b_1x7_reduce_bn, n.reduction_b_1x7_reduce_scale, n.reduction_b_1x7_reduce_relu, \
n.reduction_b_1x7, n.reduction_b_1x7_bn, n.reduction_b_1x7_scale, n.reduction_b_1x7_relu, n.reduction_b_7x1, \
n.reduction_b_7x1_bn, n.reduction_b_7x1_scale, n.reduction_b_7x1_relu, n.reduction_b_3x3_2, n.reduction_b_3x3_2_bn, \
n.reduction_b_3x3_2_scale, n.reduction_b_3x3_2_relu, n.reduction_b_concat = \
reduction_v3_b(n.inception_b4_concat) # 1280x8x8
# 2 x inception_v3_c
n.inception_c1_pool, n.inception_c1_1x1, n.inception_c1_1x1_bn, n.inception_c1_1x1_scale, n.inception_c1_1x1_relu, \
n.inception_c1_1x1_2, n.inception_c1_1x1_2_bn, n.inception_c1_1x1_2_scale, n.inception_c1_1x1_2_relu, \
n.inception_c1_1x3_reduce, n.inception_c1_1x3_reduce_bn, n.inception_c1_1x3_reduce_scale, n.inception_c1_1x3_reduce_relu, \
n.inception_c1_1x3, n.inception_c1_1x3_bn, n.inception_c1_1x3_scale, n.inception_c1_1x3_relu, n.inception_c1_3x1, \
n.inception_c1_3x1_bn, n.inception_c1_3x1_scale, n.inception_c1_3x1_relu, n.inception_c1_3x3_reduce, \
n.inception_c1_3x3_reduce_bn, n.inception_c1_3x3_reduce_scale, n.inception_c1_3x3_reduce_relu, n.inception_c1_3x3, \
n.inception_c1_3x3_bn, n.inception_c1_3x3_scale, n.inception_c1_3x3_relu, n.inception_c1_1x3_2, n.inception_c1_1x3_2_bn, \
n.inception_c1_1x3_2_scale, n.inception_c1_1x3_2_relu, n.inception_c1_3x1_2, n.inception_c1_3x1_2_bn, n.inception_c1_3x1_2_scale, \
n.inception_c1_3x1_2_relu, n.inception_c1_concat = \
inception_v3_c(n.reduction_b_concat) # 2048x8x8
n.inception_c2_pool, n.inception_c2_1x1, n.inception_c2_1x1_bn, n.inception_c2_1x1_scale, n.inception_c2_1x1_relu, \
n.inception_c2_1x1_2, n.inception_c2_1x1_2_bn, n.inception_c2_1x1_2_scale, n.inception_c2_1x1_2_relu, \
n.inception_c2_1x3_reduce, n.inception_c2_1x3_reduce_bn, n.inception_c2_1x3_reduce_scale, n.inception_c2_1x3_reduce_relu, \
n.inception_c2_1x3, n.inception_c2_1x3_bn, n.inception_c2_1x3_scale, n.inception_c2_1x3_relu, n.inception_c2_3x1, \
n.inception_c2_3x1_bn, n.inception_c2_3x1_scale, n.inception_c2_3x1_relu, n.inception_c2_3x3_reduce, \
n.inception_c2_3x3_reduce_bn, n.inception_c2_3x3_reduce_scale, n.inception_c2_3x3_reduce_relu, n.inception_c2_3x3, \
n.inception_c2_3x3_bn, n.inception_c2_3x3_scale, n.inception_c2_3x3_relu, n.inception_c2_1x3_2, n.inception_c2_1x3_2_bn, \
n.inception_c2_1x3_2_scale, n.inception_c2_1x3_2_relu, n.inception_c2_3x1_2, n.inception_c2_3x1_2_bn, n.inception_c2_3x1_2_scale, \
n.inception_c2_3x1_2_relu, n.inception_c2_concat = \
inception_v3_c(n.inception_c1_concat, pool=P.Pooling.MAX) # 2048x8x8
# loss 2
n.pool_8x8_s1 = L.Pooling(n.inception_c2_concat, kernel_size=8, pool=P.Pooling.AVE)
n.pool_8x8_s1_drop = L.Dropout(n.pool_8x8_s1, dropout_param=dict(dropout_ratio=0.2))
n.classifier = L.InnerProduct(n.pool_8x8_s1_drop, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 29,733 | 70.995157 | 140 | py |
caffe-model | caffe-model-master/cls/resnet-v1/resnet_v1.py | import caffe
from caffe import layers as L
from caffe import params as P
def conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def conv_bn_scale(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
return conv, conv_bn, conv_scale
def eltwize_relu(bottom1, bottom2):
residual_eltwise = L.Eltwise(bottom1, bottom2, eltwise_param=dict(operation=1))
residual_eltwise_relu = L.ReLU(residual_eltwise, in_place=True)
return residual_eltwise, residual_eltwise_relu
def residual_branch(bottom, base_output=64):
"""
input:4*base_output x n x n
output:4*base_output x n x n
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
"""
branch2a, branch2a_bn, branch2a_scale, branch2a_relu = \
conv_bn_scale_relu(bottom, num_output=base_output, kernel_size=1) # base_output x n x n
branch2b, branch2b_bn, branch2b_scale, branch2b_relu = \
conv_bn_scale_relu(branch2a, num_output=base_output, kernel_size=3, pad=1) # base_output x n x n
branch2c, branch2c_bn, branch2c_scale = \
conv_bn_scale(branch2b, num_output=4 * base_output, kernel_size=1) # 4*base_output x n x n
residual, residual_relu = \
eltwize_relu(bottom, branch2c) # 4*base_output x n x n
return branch2a, branch2a_bn, branch2a_scale, branch2a_relu, branch2b, branch2b_bn, branch2b_scale, branch2b_relu, \
branch2c, branch2c_bn, branch2c_scale, residual, residual_relu
def residual_branch_shortcut(bottom, stride=2, base_output=64):
"""
:param stride: stride
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
"""
branch1, branch1_bn, branch1_scale = \
conv_bn_scale(bottom, num_output=4 * base_output, kernel_size=1, stride=stride)
branch2a, branch2a_bn, branch2a_scale, branch2a_relu = \
conv_bn_scale_relu(bottom, num_output=base_output, kernel_size=1, stride=stride)
branch2b, branch2b_bn, branch2b_scale, branch2b_relu = \
conv_bn_scale_relu(branch2a, num_output=base_output, kernel_size=3, pad=1)
branch2c, branch2c_bn, branch2c_scale = \
conv_bn_scale(branch2b, num_output=4 * base_output, kernel_size=1)
residual, residual_relu = \
eltwize_relu(branch1, branch2c) # 4*base_output x n x n
return branch1, branch1_bn, branch1_scale, branch2a, branch2a_bn, branch2a_scale, branch2a_relu, branch2b, \
branch2b_bn, branch2b_scale, branch2b_relu, branch2c, branch2c_bn, branch2c_scale, residual, residual_relu
branch_shortcut_string = 'n.res(stage)a_branch1, n.res(stage)a_branch1_bn, n.res(stage)a_branch1_scale, \
n.res(stage)a_branch2a, n.res(stage)a_branch2a_bn, n.res(stage)a_branch2a_scale, n.res(stage)a_branch2a_relu, \
n.res(stage)a_branch2b, n.res(stage)a_branch2b_bn, n.res(stage)a_branch2b_scale, n.res(stage)a_branch2b_relu, \
n.res(stage)a_branch2c, n.res(stage)a_branch2c_bn, n.res(stage)a_branch2c_scale, n.res(stage)a, n.res(stage)a_relu = \
residual_branch_shortcut((bottom), stride=(stride), base_output=(num))'
branch_string = 'n.res(stage)b(order)_branch2a, n.res(stage)b(order)_branch2a_bn, n.res(stage)b(order)_branch2a_scale, \
n.res(stage)b(order)_branch2a_relu, n.res(stage)b(order)_branch2b, n.res(stage)b(order)_branch2b_bn, \
n.res(stage)b(order)_branch2b_scale, n.res(stage)b(order)_branch2b_relu, n.res(stage)b(order)_branch2c, \
n.res(stage)b(order)_branch2c_bn, n.res(stage)b(order)_branch2c_scale, n.res(stage)b(order), n.res(stage)b(order)_relu = \
residual_branch((bottom), base_output=(num))'
class ResNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def resnet_layers_proto(self, batch_size, phase='TRAIN', stages=(3, 4, 6, 3)):
"""
:param batch_size: the batch_size of train and test phase
:param phase: TRAIN or TEST
:param stages: the num of layers = 2 + 3*sum(stages), layers would better be chosen from [50, 101, 152]
{every stage is composed of 1 residual_branch_shortcut module and stage[i]-1 residual_branch
modules, each module consists of 3 conv layers}
(3, 4, 6, 3) for 50 layers; (3, 4, 23, 3) for 101 layers; (3, 8, 36, 3) for 152 layers
"""
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
conv_bn_scale_relu(n.data, num_output=64, kernel_size=7, stride=2, pad=3) # 64x112x112
n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x56x56
for num in xrange(len(stages)): # num = 0, 1, 2, 3
for i in xrange(stages[num]):
if i == 0:
stage_string = branch_shortcut_string
bottom_string = ['n.pool1', 'n.res2b%s' % str(stages[0] - 1), 'n.res3b%s' % str(stages[1] - 1),
'n.res4b%s' % str(stages[2] - 1)][num]
else:
stage_string = branch_string
if i == 1:
bottom_string = 'n.res%sa' % str(num + 2)
else:
bottom_string = 'n.res%sb%s' % (str(num + 2), str(i - 1))
exec (stage_string.replace('(stage)', str(num + 2)).replace('(bottom)', bottom_string).
replace('(num)', str(2 ** num * 64)).replace('(order)', str(i)).
replace('(stride)', str(int(num > 0) + 1)))
exec 'n.pool5 = L.Pooling((bottom), pool=P.Pooling.AVE, global_pooling=True)'.\
replace('(bottom)', 'n.res5b%s' % str(stages[3] - 1))
n.classifier = L.InnerProduct(n.pool5, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 8,064 | 50.369427 | 130 | py |
caffe-model | caffe-model-master/cls/inception-resnet-v2/inception_resnet.py | import caffe
from caffe import layers as L
from caffe import params as P
def fc_relu_drop(bottom, num_output=1024, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=num_output,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def factorization_conv_bn_scale(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
return conv, conv_bn, conv_scale
def factorization_conv_mxn(bottom, num_output=64, kernel_h=1, kernel_w=7, stride=1, pad_h=3, pad_w=0):
conv_mxn = L.Convolution(bottom, num_output=num_output, kernel_h=kernel_h, kernel_w=kernel_w, stride=stride,
pad_h=pad_h, pad_w=pad_w,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_mxn_bn = L.BatchNorm(conv_mxn, use_global_stats=False, in_place=True)
conv_mxn_scale = L.Scale(conv_mxn, scale_param=dict(bias_term=True), in_place=True)
conv_mxn_relu = L.ReLU(conv_mxn, in_place=True)
return conv_mxn, conv_mxn_bn, conv_mxn_scale, conv_mxn_relu
def eltwise_relu(bottom1, bottom2):
residual_eltwise = L.Eltwise(bottom1, bottom2, eltwise_param=dict(operation=1))
residual_eltwise_relu = L.ReLU(residual_eltwise, in_place=True)
return residual_eltwise, residual_eltwise_relu
def stem_resnet_v2_299x299(bottom):
"""
input:3x299x299
output:320x35x35
:param bottom: bottom layer
:return: layers
"""
conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=3, stride=2) # 32x149x149
conv2_3x3_s1, conv2_3x3_s1_bn, conv2_3x3_s1_scale, conv2_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv1_3x3_s2, num_output=32, kernel_size=3) # 32x147x147
conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu = \
factorization_conv_bn_scale_relu(conv2_3x3_s1, num_output=64, kernel_size=3, pad=1) # 64x147x147
pool1_3x3_s2 = L.Pooling(conv3_3x3_s1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x73x73
conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(pool1_3x3_s2, num_output=80, kernel_size=1) # 80x73x73
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu = \
factorization_conv_bn_scale_relu(conv4_3x3_reduce, num_output=192, kernel_size=3) # 192x71x71
pool2_3x3_s2 = L.Pooling(conv4_3x3, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 192x35x35
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(pool2_3x3_s2, num_output=96, kernel_size=1) # 96x35x35
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu = \
factorization_conv_bn_scale_relu(pool2_3x3_s2, num_output=48, kernel_size=1) # 48x35x35
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu = \
factorization_conv_bn_scale_relu(conv_5x5_reduce, num_output=64, kernel_size=5, pad=2) # 64x35x35
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(pool2_3x3_s2, num_output=64, kernel_size=1) # 64x35x35
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=96, kernel_size=3, pad=1) # 96x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3, num_output=96, kernel_size=3, pad=1) # 96x35x35
ave_pool = L.Pooling(pool2_3x3_s2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.AVE) # 192x35x35
conv_1x1_ave, conv_1x1_ave_bn, conv_1x1_ave_scale, conv_1x1_ave_relu = \
factorization_conv_bn_scale_relu(ave_pool, num_output=64, kernel_size=1) # 64x35x35
concat = L.Concat(conv_1x1, conv_5x5, conv_3x3_2, conv_1x1_ave) # 320(96+64+96+64)x35x35
return conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_relu, conv2_3x3_s1, conv2_3x3_s1_bn, \
conv2_3x3_s1_scale, conv2_3x3_s1_relu, conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu, \
pool1_3x3_s2, conv4_3x3_reduce, conv4_3x3_reduce_bn, conv4_3x3_reduce_scale, conv4_3x3_reduce_relu, \
conv4_3x3, conv4_3x3_bn, conv4_3x3_scale, conv4_3x3_relu, pool2_3x3_s2, conv_1x1, conv_1x1_bn, conv_1x1_scale, \
conv_1x1_relu, conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu, \
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, \
conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2, conv_3x3_2_bn, \
conv_3x3_2_scale, conv_3x3_2_relu, ave_pool, conv_1x1_ave, conv_1x1_ave_bn, conv_1x1_ave_scale, conv_1x1_ave_relu, \
concat
def inception_resnet_v2_a(bottom):
"""
input:320x35x35
output:320x35x35
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1) # 32x35x35
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1) # 32x35x35
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=32, kernel_size=3, pad=1) # 32x35x35
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1) # 32x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=48, kernel_size=3, pad=1) # 48x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=64, kernel_size=3, pad=1) # 64x35x35
concat = L.Concat(conv_1x1, conv_3x3, conv_3x3_3) # 128(32+32+64)x35x35
conv_up, conv_up_bn, conv_up_scale = \
factorization_conv_bn_scale(concat, num_output=320, kernel_size=1) # 320x35x35
residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up) # 320x35x35
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_bn, \
conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, \
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, \
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, \
conv_3x3_3_relu, concat, conv_up, conv_up_bn, conv_up_scale, residual_eltwise, residual_eltwise_relu
def reduction_resnet_v2_a(bottom):
"""
input:320x35x35
output:1088x17x17
:param bottom: bottom layer
:return: layers
"""
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=3, stride=2) # 384x17x17
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x35x35
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=256, kernel_size=3, stride=1, pad=1) # 256x35x35
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2, num_output=384, kernel_size=3, stride=2) # 384x17x17
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 320x17x17
concat = L.Concat(conv_3x3, conv_3x3_3, pool) # 1088(320+384+384)x17x17
return conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, pool, concat
def inception_resnet_v2_b(bottom):
"""
input:1088x17x17
output:1088x17x17
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x17x17
conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=128, kernel_size=1) # 128x17x17
conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
factorization_conv_mxn(conv_1x7_reduce, num_output=160, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3) # 160x17x17
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0) # 192x17x17
concat = L.Concat(conv_1x1, conv_7x1) # 384(192+192)x17x17
conv_up, conv_up_bn, conv_up_scale = \
factorization_conv_bn_scale(concat, num_output=1088, kernel_size=1) # 1088x17x17
residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up) # 1088x17x17
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x7_reduce, conv_1x7_reduce_bn, \
conv_1x7_reduce_scale, conv_1x7_reduce_relu, conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, \
conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, concat, conv_up, conv_up_bn, conv_up_scale, \
residual_eltwise, residual_eltwise_relu
def reduction_resnet_v2_b(bottom):
"""
input:1088x17x17
output:2080x8x8
:param bottom: bottom layer
:return: layers
"""
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x17x17
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=384, kernel_size=3, stride=2) # 384x8x8
conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x17x17
conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=288, kernel_size=3, stride=2) # 288x8x8
conv_3x3_3_reduce, conv_3x3_3_reduce_bn, conv_3x3_3_reduce_scale, conv_3x3_3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1) # 256x17x17
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_3_reduce, num_output=288, kernel_size=3, pad=1) # 288x17x17
conv_3x3_4, conv_3x3_4_bn, conv_3x3_4_scale, conv_3x3_4_relu = \
factorization_conv_bn_scale_relu(conv_3x3_3, num_output=320, kernel_size=3, stride=2) # 320x8x8
pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 1088x8x8
concat = L.Concat(conv_3x3, conv_3x3_2, conv_3x3_4, pool) # 2080(1088+384+288+320)x8x8
return conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, \
conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
conv_3x3_2_relu, conv_3x3_3_reduce, conv_3x3_3_reduce_bn, conv_3x3_3_reduce_scale, conv_3x3_3_reduce_relu, \
conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, conv_3x3_4, conv_3x3_4_bn, conv_3x3_4_scale, \
conv_3x3_4_relu, pool, concat
def inception_resnet_v2_c(bottom):
"""
input:2080x8x8
output:2080x8x8
:param bottom: bottom layer
:return: layers
"""
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x8x8
conv_1x3_reduce, conv_1x3_reduce_bn, conv_1x3_reduce_scale, conv_1x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1) # 192x8x8
conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu = \
factorization_conv_mxn(conv_1x3_reduce, num_output=224, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1) # 224x8x8
conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu = \
factorization_conv_mxn(conv_1x3, num_output=256, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0) # 256x8x8
concat = L.Concat(conv_1x1, conv_3x1) # 448(192+256)x8x8
conv_up, conv_up_bn, conv_up_scale = \
factorization_conv_bn_scale(concat, num_output=2080, kernel_size=1) # 2080x8x8
residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up) # 2080x8x8
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x3_reduce, conv_1x3_reduce_bn, \
conv_1x3_reduce_scale, conv_1x3_reduce_relu, conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu, \
conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu, concat, conv_up, conv_up_bn, conv_up_scale, \
residual_eltwise, residual_eltwise_relu
string_a = 'n.inception_resnet_v2_a(order)_1x1, n.inception_resnet_v2_a(order)_1x1_bn, n.inception_resnet_v2_a(order)_1x1_scale, \
n.inception_resnet_v2_a(order)_1x1_relu, n.inception_resnet_v2_a(order)_3x3_reduce, n.inception_resnet_v2_a(order)_3x3_reduce_bn, \
n.inception_resnet_v2_a(order)_3x3_reduce_scale, n.inception_resnet_v2_a(order)_3x3_reduce_relu, n.inception_resnet_v2_a(order)_3x3, \
n.inception_resnet_v2_a(order)_3x3_bn, n.inception_resnet_v2_a(order)_3x3_scale, n.inception_resnet_v2_a(order)_3x3_relu, \
n.inception_resnet_v2_a(order)_3x3_2_reduce, n.inception_resnet_v2_a(order)_3x3_2_reduce_bn, n.inception_resnet_v2_a(order)_3x3_2_reduce_scale, \
n.inception_resnet_v2_a(order)_3x3_2_reduce_relu, n.inception_resnet_v2_a(order)_3x3_2, n.inception_resnet_v2_a(order)_3x3_2_bn, \
n.inception_resnet_v2_a(order)_3x3_2_scale, n.inception_resnet_v2_a(order)_3x3_2_relu, n.inception_resnet_v2_a(order)_3x3_3, \
n.inception_resnet_v2_a(order)_3x3_3_bn, n.inception_resnet_v2_a(order)_3x3_3_scale, n.inception_resnet_v2_a(order)_3x3_3_relu, \
n.inception_resnet_v2_a(order)_concat, n.inception_resnet_v2_a(order)_up, n.inception_resnet_v2_a(order)_up_bn, \
n.inception_resnet_v2_a(order)_up_scale, n.inception_resnet_v2_a(order)_residual_eltwise, \
n.inception_resnet_v2_a(order)_residual_eltwise_relu = \
inception_resnet_v2_a(bottom)'
string_b = 'n.inception_resnet_v2_b(order)_1x1, n.inception_resnet_v2_b(order)_1x1_bn, n.inception_resnet_v2_b(order)_1x1_scale, \
n.inception_resnet_v2_b(order)_1x1_relu, n.inception_resnet_v2_b(order)_1x7_reduce, n.inception_resnet_v2_b(order)_1x7_reduce_bn, \
n.inception_resnet_v2_b(order)_1x7_reduce_scale, n.inception_resnet_v2_b(order)_1x7_reduce_relu, n.inception_resnet_v2_b(order)_1x7, \
n.inception_resnet_v2_b(order)_1x7_bn, n.inception_resnet_v2_b(order)_1x7_scale, n.inception_resnet_v2_b(order)_1x7_relu, \
n.inception_resnet_v2_b(order)_7x1, n.inception_resnet_v2_b(order)_7x1_bn, n.inception_resnet_v2_b(order)_7x1_scale, \
n.inception_resnet_v2_b(order)_7x1_relu, n.inception_resnet_v2_b(order)_concat, n.inception_resnet_v2_b(order)_up, \
n.inception_resnet_v2_b(order)_up_bn, n.inception_resnet_v2_b(order)_up_scale, n.inception_resnet_v2_b(order)_residual_eltwise, \
n.inception_resnet_v2_b(order)_residual_eltwise_relu \
= inception_resnet_v2_b(bottom)'
string_c = 'n.inception_resnet_v2_c(order)_1x1, n.inception_resnet_v2_c(order)_1x1_bn, n.inception_resnet_v2_c(order)_1x1_scale, \
n.inception_resnet_v2_c(order)_1x1_relu, n.inception_resnet_v2_c(order)_1x3_reduce, n.inception_resnet_v2_c(order)_1x3_reduce_bn, \
n.inception_resnet_v2_c(order)_1x3_reduce_scale, n.inception_resnet_v2_c(order)_1x3_reduce_relu, n.inception_resnet_v2_c(order)_1x3, \
n.inception_resnet_v2_c(order)_1x3_bn, n.inception_resnet_v2_c(order)_1x3_scale, n.inception_resnet_v2_c(order)_1x3_relu, \
n.inception_resnet_v2_c(order)_3x1, n.inception_resnet_v2_c(order)_3x1_bn, n.inception_resnet_v2_c(order)_3x1_scale, \
n.inception_resnet_v2_c(order)_3x1_relu, n.inception_resnet_v2_c(order)_concat, n.inception_resnet_v2_c(order)_up, \
n.inception_resnet_v2_c(order)_up_bn, n.inception_resnet_v2_c(order)_up_scale, n.inception_resnet_v2_c(order)_residual_eltwise, \
n.inception_resnet_v2_c(order)_residual_eltwise_relu = \
inception_resnet_v2_c(bottom)'
class InceptionResNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def inception_resnet_v2_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=299, mean_value=[104, 117, 123], mirror=mirror))
# stem
n.conv1_3x3_s2, n.conv1_3x3_s2_bn, n.conv1_3x3_s2_scale, n.conv1_3x3_relu, n.conv2_3x3_s1, n.conv2_3x3_s1_bn, \
n.conv2_3x3_s1_scale, n.conv2_3x3_relu, n.conv3_3x3_s1, n.conv3_3x3_s1_bn, n.conv3_3x3_s1_scale, n.conv3_3x3_relu, \
n.pool1_3x3_s2, n.conv4_3x3_reduce, n.conv4_3x3_reduce_bn, n.conv4_3x3_reduce_scale, n.conv4_3x3_reduce_relu, \
n.conv4_3x3, n.conv4_3x3_bn, n.conv4_3x3_scale, n.conv4_relu_3x3, n.pool2_3x3_s2, n.conv5_1x1, n.conv5_1x1_bn, n.conv5_1x1_scale, \
n.conv5_1x1_relu, n.conv5_5x5_reduce, n.conv5_5x5_reduce_bn, n.conv5_5x5_reduce_scale, n.conv5_5x5_reduce_relu, \
n.conv5_5x5, n.conv5_5x5_bn, n.conv5_5x5_scale, n.conv5_5x5_relu, n.conv5_3x3_reduce, n.conv5_3x3_reduce_bn, n.conv5_3x3_reduce_scale, \
n.conv5_3x3_reduce_relu, n.conv5_3x3, n.conv5_3x3_bn, n.conv5_3x3_scale, n.conv5_3x3_relu, n.conv5_3x3_2, n.conv5_3x3_2_bn, \
n.conv5_3x3_2_scale, n.conv5_3x3_2_relu, n.ave_pool, n.conv5_1x1_ave, n.conv5_1x1_ave_bn, n.conv5_1x1_ave_scale, n.conv5_1x1_ave_relu, \
n.stem_concat = stem_resnet_v2_299x299(n.data) # 320x35x35
# 10 x inception_resnet_v2_a
for i in xrange(10):
if i == 0:
bottom = 'n.stem_concat'
else:
bottom = 'n.inception_resnet_v2_a(order)_residual_eltwise'.replace('(order)', str(i))
exec (string_a.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 384x35x35
# reduction_resnet_v2_a
n.reduction_a_3x3, n.reduction_a_3x3_bn, n.reduction_a_3x3_scale, n.reduction_a_3x3_relu, \
n.reduction_a_3x3_2_reduce, n.reduction_a_3x3_2_reduce_bn, n.reduction_a_3x3_2_reduce_scale, \
n.reduction_a_3x3_2_reduce_relu, n.reduction_a_3x3_2, n.reduction_a_3x3_2_bn, n.reduction_a_3x3_2_scale, \
n.reduction_a_3x3_2_relu, n.reduction_a_3x3_3, n.reduction_a_3x3_3_bn, n.reduction_a_3x3_3_scale, \
n.reduction_a_3x3_3_relu, n.reduction_a_pool, n.reduction_a_concat = \
reduction_resnet_v2_a(n.inception_resnet_v2_a10_residual_eltwise) # 1088x17x17
# 20 x inception_resnet_v2_b
for i in xrange(20):
if i == 0:
bottom = 'n.reduction_a_concat'
else:
bottom = 'n.inception_resnet_v2_b(order)_residual_eltwise'.replace('(order)', str(i))
exec (string_b.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 1088x17x17
# reduction_resnet_v2_b
n.reduction_b_3x3_reduce, n.reduction_b_3x3_reduce_bn, n.reduction_b_3x3_reduce_scale, \
n.reduction_b_3x3_reduce_relu, n.reduction_b_3x3, n.reduction_b_3x3_bn, n.reduction_b_3x3_scale, \
n.reduction_b_3x3_relu, n.reduction_b_3x3_2_reduce, n.reduction_b_3x3_2_reduce_bn, n.reduction_b_3x3_2_reduce_scale, \
n.reduction_b_3x3_2_reduce_relu, n.reduction_b_3x3_2, n.reduction_b_3x3_2_bn, n.reduction_b_3x3_2_scale, \
n.reduction_b_3x3_2_relu, n.reduction_b_3x3_3_reduce, n.reduction_b_3x3_3_reduce_bn, n.reduction_b_3x3_3_reduce_scale, \
n.reduction_b_3x3_3_reduce_relu, n.reduction_b_3x3_3, n.reduction_b_3x3_3_bn, n.reduction_b_3x3_3_scale, \
n.reduction_b_3x3_3_relu, n.reduction_b_3x3_4, n.reduction_b_3x3_4_bn, n.reduction_b_3x3_4_scale, \
n.reduction_b_3x3_4_relu, n.reduction_b_pool, n.reduction_b_concat = \
reduction_resnet_v2_b(n.inception_resnet_v2_b20_residual_eltwise) # 2080x8x8
# 9 x inception_resnet_v2_c
for i in xrange(9):
if i == 0:
bottom = 'n.reduction_b_concat'
else:
bottom = 'n.inception_resnet_v2_c(order)_residual_eltwise'.replace('(order)', str(i))
exec (string_c.replace('(order)', str(i + 1)).replace('bottom', bottom)) # 2080x8x8
n.inception_resnet_v2_c10_1x1, n.inception_resnet_v2_c10_1x1_bn, n.inception_resnet_v2_c10_1x1_scale, \
n.inception_resnet_v2_c10_1x1_relu = \
factorization_conv_bn_scale_relu(n.inception_resnet_v2_c9_residual_eltwise, num_output=192,
kernel_size=1) # 192x8x8
n.inception_resnet_v2_c10_1x3_reduce, n.inception_resnet_v2_c10_1x3_reduce_bn, \
n.inception_resnet_v2_c10_1x3_reduce_scale, n.inception_resnet_v2_c10_1x3_reduce_relu = \
factorization_conv_bn_scale_relu(n.inception_resnet_v2_c9_residual_eltwise, num_output=192,
kernel_size=1) # 192x8x8
n.inception_resnet_v2_c10_1x3, n.inception_resnet_v2_c10_1x3_bn, n.inception_resnet_v2_c10_1x3_scale, \
n.inception_resnet_v2_c10_1x3_relu = \
factorization_conv_mxn(n.inception_resnet_v2_c10_1x3_reduce, num_output=224, kernel_h=1, kernel_w=3,
pad_h=0, pad_w=1) # 224x8x8
n.inception_resnet_v2_c10_3x1, n.inception_resnet_v2_c10_3x1_bn, n.inception_resnet_v2_c10_3x1_scale, \
n.inception_resnet_v2_c10_3x1_relu = \
factorization_conv_mxn(n.inception_resnet_v2_c10_1x3, num_output=256, kernel_h=3, kernel_w=1, pad_h=1,
pad_w=0) # 256x8x8
n.inception_resnet_v2_c10_concat = L.Concat(n.inception_resnet_v2_c10_1x1,
n.inception_resnet_v2_c10_3x1) # 448(192+256)x8x8
n.inception_resnet_v2_c10_up, n.inception_resnet_v2_c10_up_bn, n.inception_resnet_v2_c10_up_scale = \
factorization_conv_bn_scale(n.inception_resnet_v2_c10_concat, num_output=2080,
kernel_size=1) # 2080x8x8
n.inception_resnet_v2_c10_residual_eltwise = \
L.Eltwise(n.inception_resnet_v2_c9_residual_eltwise, n.inception_resnet_v2_c10_up,
eltwise_param=dict(operation=1)) # 2080x8x8
n.conv6_1x1, n.conv6_1x1_bn, n.conv6_1x1_scale, n.conv6_1x1_relu = \
factorization_conv_bn_scale_relu(n.inception_resnet_v2_c10_residual_eltwise, num_output=1536,
kernel_size=1) # 1536x8x8
n.pool_8x8_s1 = L.Pooling(n.conv6_1x1,
pool=P.Pooling.AVE,
global_pooling=True) # 1536x1x1
n.pool_8x8_s1_drop = L.Dropout(n.pool_8x8_s1, dropout_param=dict(dropout_ratio=0.2))
n.classifier = L.InnerProduct(n.pool_8x8_s1_drop, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 26,493 | 61.781991 | 153 | py |
tt-pytorch | tt-pytorch-master/setup.py |
from setuptools import setup
setup(
name='t3nsor',
version='1.0',
description='TT decomposition on Pytorch',
author='V. Khrulkov, L. Mirvakhabova, A. Grinchuk',
author_email='khrulkov.v@gmail.com',
packages=['t3nsor'], #same as name
install_requires=['numpy', 'sympy', 'scipy'], #external packages as dependencies
)
| 340 | 25.230769 | 83 | py |
tt-pytorch | tt-pytorch-master/sentiment/utils.py | import torch
import torch.nn as nn
import subprocess
import pandas as pd
import pickle
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
if len(preds.shape) == 1:
rounded_preds = torch.round(torch.sigmoid(preds))
else:
rounded_preds = preds.argmax(1)
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum()/len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
total_len = 0
model.train()
if isinstance(criterion, nn.CrossEntropyLoss):
dtype = torch.LongTensor
elif isinstance(criterion, nn.BCEWithLogitsLoss):
dtype = torch.FloatTensor
for i, batch in enumerate(iterator):
optimizer.zero_grad()
device = batch.text.device
labels = batch.label.type(dtype).to(device)
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, labels)
acc = binary_accuracy(predictions, labels)
loss.backward()
optimizer.step()
B = batch.label.shape[0]
epoch_loss += B * loss.item()
epoch_acc += B * acc.item()
total_len += B
if i > len(iterator):
break
return epoch_loss / total_len, epoch_acc / total_len
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
total_len = 0
model.eval()
if isinstance(criterion, nn.CrossEntropyLoss):
dtype = torch.LongTensor
elif isinstance(criterion, nn.BCEWithLogitsLoss):
dtype = torch.FloatTensor
with torch.no_grad():
for i, batch in enumerate(iterator):
device = batch.text.device
labels = batch.label.type(dtype).to(device)
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, labels)
acc = binary_accuracy(predictions, labels)
B = batch.label.shape[0]
epoch_loss += B * loss.item()
epoch_acc += B * acc.item()
total_len += B
if i > len(iterator):
break
return epoch_loss / total_len, epoch_acc / total_len | 2,334 | 23.840426 | 83 | py |
tt-pytorch | tt-pytorch-master/sentiment/models.py | import sys
import torch
import numpy as np
import torch.nn as nn
import t3nsor as t3
class LSTM_Classifier(nn.Module):
def __init__(
self,
embedding_dim,
hidden_dim,
output_dim,
n_layers,
bidirectional,
dropout
):
super().__init__()
self.rnn = nn.LSTM(
embedding_dim, hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
if bidirectional:
self.fc = nn.Linear(hidden_dim * 2, output_dim)
else:
self.fc = nn.Linear(hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.bidirectional = bidirectional
def forward(self, x):
embedded = self.dropout(x)
output, (hidden, cell) = self.rnn(embedded)
if self.bidirectional:
hidden = self.dropout(
torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
else:
hidden = self.dropout(hidden)
return self.fc(hidden.squeeze(0))
| 1,074 | 25.219512 | 71 | py |
tt-pytorch | tt-pytorch-master/sentiment/train.py | import argparse
import sys
sys.path.insert(0, '..')
parser = argparse.ArgumentParser()
parser.add_argument(
'--embedding',
default='tt',
choices=['tt', 'tr', 'full'],
type=str)
parser.add_argument('--ranks', type=int, default=8)
parser.add_argument('--d', type=int, default=3)
parser.add_argument('--embed_dim', type=int)
parser.add_argument('--voc_dim', default=25000, type=int)
parser.add_argument('--lr', default=5e-4)
parser.add_argument('--gpu', default='', type=str)
parser.add_argument('--hidden_dim', default=128, type=int)
parser.add_argument('--n_epochs', default=10, type=int)
parser.add_argument('--fout', default="logdir/", type=str)
parser.add_argument('--dropout', default=0.5, type=float)
parser.add_argument(
'--dataset',
default='imdb',
choices=['imdb', 'sst3', 'sst5'],
type=str)
args = parser.parse_args()
if args.embedding == 'tt':
tt = "tt"
elif args.embedding == 'tt':
tt = 'tr'
else:
tt = "full"
model_name = f"{args.dataset}-dim_{args.embed_dim}-d_{args.d}-ranks_{args.ranks}-{tt}"
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']=args.gpu
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import t3nsor as t3
from torchtext import data
from torchtext import datasets
import torch.optim as optim
from models import LSTM_Classifier
from utils import binary_accuracy, train, evaluate
import pickle
import random
random.seed(42)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
TEXT = data.Field(tokenize='spacy', fix_length=1000)
LABEL = data.LabelField(dtype=torch.float)
print('Building dataset...')
if args.dataset == 'imdb':
OUTPUT_DIM = 1
train_data, test_ = datasets.IMDB.splits(TEXT, LABEL)
test_list = list(test_)
random.shuffle(test_list)
test_data_ = test_list[:12500]
val_data_ = test_list[12500:]
valid_data = data.dataset.Dataset(
val_data_, fields=[('text', TEXT), ('label', LABEL)])
test_data = data.dataset.Dataset(
test_data_, fields=[('text', TEXT), ('label', LABEL)])
elif args.dataset[:3] == 'sst':
OUTPUT_DIM = int(args.dataset[3])
fine_grained = (OUTPUT_DIM == 5)
train_data, valid_data, test_data = datasets.SST.splits(
TEXT, LABEL, fine_grained=fine_grained)
else:
raise NotImplementedError
print('Done')
def sort_key(ex):
return len(ex.text)
TEXT.build_vocab(train_data, max_size=args.voc_dim - 2)
LABEL.build_vocab(train_data)
BATCH_SIZE = 256
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device)
valid_iterator.sort_key = sort_key
test_iterator.sort_key = sort_key
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = args.embed_dim
HIDDEN_DIM = args.hidden_dim
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = args.dropout
actual_vocab_size = len(TEXT.vocab.stoi)
lstm_model = LSTM_Classifier(embedding_dim=EMBEDDING_DIM,
hidden_dim=HIDDEN_DIM,
output_dim=OUTPUT_DIM,
n_layers=N_LAYERS,
bidirectional=BIDIRECTIONAL,
dropout=DROPOUT)
if args.embedding == 'tt':
embed_model = t3.TTEmbedding(
voc_size=INPUT_DIM,
emb_size=EMBEDDING_DIM,
auto_shapes=True,
auto_shape_mode='mixed',
d=args.d,
tt_rank=args.ranks,
padding_idx=1
)
compression_rate = INPUT_DIM * EMBEDDING_DIM / embed_model.tt_matrix.dof
elif args.embedding == 'tr':
embed_model = t3.TREmbedding(
voc_size=INPUT_DIM,
emb_size=EMBEDDING_DIM,
auto_shapes=True,
auto_shape_mode='mixed',
d=args.d,
tr_rank=args.ranks,
padding_idx=1
)
compression_rate = INPUT_DIM * EMBEDDING_DIM / embed_model.tr_matrix.dof
else:
embed_model = nn.Embedding(
num_embeddings=INPUT_DIM,
embedding_dim=EMBEDDING_DIM
)
compression_rate = 1.0
def cross_entropy_loss(logits, target):
labels = target.type(torch.LongTensor).to(logits.device)
return nn.CrossEntropyLoss()(logits, labels)
model = nn.Sequential(embed_model, lstm_model)
n_all_param = sum([p.nelement() for p in model.parameters()])
if args.dataset == 'imdb':
criterion = nn.BCEWithLogitsLoss()
#criterion = criterion.to(device)
elif args.dataset[:3] == 'sst':
criterion = nn.CrossEntropyLoss()
#criterion = criterion.to(device)
else:
raise NotImplementedError
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
print(model)
N_EPOCHS = args.n_epochs
log = {
'compression_rate':compression_rate,
'train_loss':[], 'test_loss':[], 'valid_loss':[],
'train_acc':[], 'test_acc':[], 'valid_acc':[]}
best_result = {
"epoch": 0, "train_acc": 0, "valid_acc": 0, "train_acc": 0}
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
test_loss, test_acc = evaluate(model, test_iterator, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
log['train_loss'].append(train_loss)
log['test_loss'].append(test_loss)
log['train_acc'].append(train_acc)
log['test_acc'].append(test_acc)
log['valid_acc'].append(valid_acc)
log['valid_loss'].append(valid_loss)
if best_result["valid_acc"] < valid_acc:
best_result["epoch"] = epoch
best_result["train_acc"] = train_acc
best_result["valid_acc"] = valid_acc
best_result["test_acc"] = test_acc
if args.fout is not None:
with open(args.fout+f"{model_name}-best.pkl", 'wb') as f:
pickle.dump(best_result, f)
print(f'| Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}% | Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% |')
print ("TEST ACCURACY:", np.round(best_result["test_acc"] * 100, 2))
if epoch == 0 or epoch == N_EPOCHS-1:
+ print('Compression rate:', compression_rate)
+ print('#params = {}'.format(n_all_param))
| 6,304 | 29.606796 | 226 | py |
tt-pytorch | tt-pytorch-master/t3nsor/initializers.py | import numpy as np
import torch
from t3nsor.tensor_train import TensorTrain
from t3nsor.tensor_train import TensorTrainBatch
def _validate_input_parameters(is_tensor, shape, **params):
"""Internal function for validating input parameters
Args:
is_tensor: bool, determines whether we attempt to construct a TT-tensor or
a TT-matrix (needed for the correct shape checks).
shape: array, the desired shape of the generated TT object
params: optional, possible values:
batch_size: int, for constructing batches
tt_rank: array or int, desired TT-ranks
"""
if is_tensor:
if len(shape.shape) != 1:
raise ValueError('shape should be 1d array, got %a' % shape)
if np.any(shape < 1):
raise ValueError('all elements in `shape` should be positive, got %a' %
shape)
if not all(isinstance(sh, np.integer) for sh in shape):
raise ValueError('all elements in `shape` should be integers, got %a' %
shape)
else:
if len(shape.shape) != 2:
raise ValueError('shape should be 2d array, got %a' % shape)
if shape[0].size != shape[1].size:
raise ValueError('shape[0] should have the same length as shape[1], but'
'got %d and %d' % (shape[0].size, shape[1].size))
if np.any(shape.flatten() < 1):
raise ValueError('all elements in `shape` should be positive, got %a' %
shape)
if not all(isinstance(sh, np.integer) for sh in shape.flatten()):
raise ValueError('all elements in `shape` should be integers, got %a' %
shape)
if 'batch_size' in params:
batch_size = params['batch_size']
if not isinstance(batch_size, (int, np.integer)):
raise ValueError('`batch_size` should be integer, got %f' % batch_size)
if batch_size < 1:
raise ValueError('Batch size should be positive, got %d' % batch_size)
if 'tt_rank' in params:
tt_rank = params['tt_rank']
if tt_rank.size == 1:
if not isinstance(tt_rank[()], np.integer):
raise ValueError('`tt_rank` should be integer, got %f' % tt_rank[()])
if tt_rank.size > 1:
if not all(isinstance(tt_r, np.integer) for tt_r in tt_rank):
raise ValueError('all elements in `tt_rank` should be integers, got'
' %a' % tt_rank)
if np.any(tt_rank < 1):
raise ValueError('`tt_rank` should be positive, got %a' % tt_rank)
if is_tensor:
if tt_rank.size != 1 and tt_rank.size != (shape.size + 1):
raise ValueError('`tt_rank` array has inappropriate size, expected'
'1 or %d, got %d' % (shape.size + 1, tt_rank.size))
else:
if tt_rank.size != 1 and tt_rank.size != (shape[0].size + 1):
raise ValueError('`tt_rank` array has inappropriate size, expected'
'1 or %d, got %d' % (shape[0].size + 1, tt_rank.size))
def tensor_ones(shape, dtype=torch.float32):
"""Generate TT-tensor of the given shape with all entries equal to 1.
Args:
shape: array representing the shape of the future tensor
dtype: [torch.float32] dtype of the resulting tensor.
name: string, name of the Op.
Returns:
TensorTrain object containing a TT-tensor
"""
shape = np.array(shape)
_validate_input_parameters(is_tensor=True, shape=shape)
num_dims = shape.size
tt_cores = num_dims * [None]
for i in range(num_dims):
curr_core_shape = (1, shape[i], 1)
tt_cores[i] = torch.ones(curr_core_shape, dtype=dtype)
return TensorTrain(tt_cores)
def tensor_zeros(shape, dtype=torch.float32):
"""Generate TT-tensor of the given shape with all entries equal to 0.
Args:
shape: array representing the shape of the future tensor
dtype: [torch.float32] dtype of the resulting tensor.
name: string, name of the Op.
Returns:
TensorTrain object containing a TT-tensor
"""
shape = np.array(shape)
_validate_input_parameters(is_tensor=True, shape=shape)
num_dims = shape.size
tt_cores = num_dims * [None]
for i in range(num_dims):
curr_core_shape = (1, shape[i], 1)
tt_cores[i] = torch.zeros(curr_core_shape, dtype=dtype)
return TensorTrain(tt_cores)
def matrix_zeros(shape, rank=2, dtype=torch.float32):
"""Generate TT-matrix of the given shape with all entries equal to 0.
Args:
shape: array representing the shape of the future tensor
dtype: [torch.float32] dtype of the resulting tensor.
name: string, name of the Op.
Returns:
TensorTrain object containing a TT-matrix
"""
shape = np.array(shape)
_validate_input_parameters(is_tensor=False, shape=shape)
num_dims = shape[0].size
tt_cores = num_dims * [None]
curr_core_shape = (1, shape[0][0], shape[1][0], rank)
tt_cores[0] = torch.zeros(curr_core_shape, dtype=dtype)
for i in range(1, num_dims - 1):
curr_core_shape = (rank, shape[0][i], shape[1][i], rank)
tt_cores[i] = torch.zeros(curr_core_shape, dtype=dtype)
curr_core_shape = (rank, shape[0][num_dims - 1], shape[1][num_dims - 1], 1)
tt_cores[num_dims - 1] = torch.zeros(curr_core_shape, dtype=dtype)
return TensorTrain(tt_cores)
def eye(shape, dtype=torch.float32):
"""Creates an identity TT-matrix.
Args:
shape: array which defines the shape of the matrix row and column
indices.
dtype: [torch.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorTrain containing an identity TT-matrix of size
np.prod(shape) x np.prod(shape)
"""
shape = np.array(shape)
# In this special case shape is in the same format as in the TT-tensor case
_validate_input_parameters(is_tensor=True, shape=shape)
num_dims = shape.size
tt_cores = num_dims * [None]
for i in range(num_dims):
curr_core_shape = (1, int(shape[i]), int(shape[i]), 1)
tt_cores[i] = torch.eye(shape[i], dtype=dtype).view(*curr_core_shape)
return TensorTrain(tt_cores)
def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a TT-matrix of given shape with N(mean, stddev^2) cores.
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
matrix_with_random_cores([[2, 2, 2], None])
and
matrix_with_random_cores([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tt_rank: a number or a (d+1)-element array with ranks.
mean: a number, the mean of the normal distribution used for
initializing TT-cores.
stddev: a number, the standard deviation of the normal distribution used
for initializing TT-cores.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorTrain containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)
num_dims = shape[0].size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.concatenate([[1], tt_rank, [1]])
tt_rank = tt_rank.astype(int)
tt_cores = [None] * num_dims
for i in range(num_dims):
curr_core_shape = (tt_rank[i], shape[0][i], shape[1][i],
tt_rank[i + 1])
tt_cores[i] = torch.randn(curr_core_shape, dtype=dtype) * stddev + mean
return TensorTrain(tt_cores)
def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a random TT-matrix of the given shape with given mean and stddev.
Entries of the generated matrix (in the full format) will be iid and satisfy
E[x_{i1i2..id}] = mean, Var[x_{i1i2..id}] = stddev^2, but the distribution is
in fact not Gaussian.
In the current implementation only mean 0 is supported. To get
a random_matrix with specified mean but tt_rank greater by 1 you can call
x = ttt.random_matrix(shape, tt_rank, stddev=stddev)
x = mean * t3f.ones_like(x) + x
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
random_matrix([[2, 2, 2], None])
and
random_matrix([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tt_rank: a number or a (d+1)-element array with ranks.
mean: a number, the desired mean for the distribution of entries.
stddev: a number, the desired standard deviation for the distribution of
entries.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorTrain containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)
num_dims = shape[0].size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.concatenate([[1], tt_rank, [1]])
tt_rank = tt_rank.astype(int)
var = np.prod(tt_rank)
# Empirically entries of a TT tensor with cores initialized from N(0, 1)
# will have variances np.prod(tt_rank) and mean 0.
# We scale each TT-core to obtain the desired stddev
cr_exponent = -1.0 / (2 * num_dims)
var = np.prod(tt_rank ** cr_exponent)
core_stddev = stddev ** (1.0 / num_dims) * var
tt = matrix_with_random_cores(shape, tt_rank=tt_rank, stddev=core_stddev, dtype=dtype)
if np.abs(mean) < 1e-8:
return tt
else:
raise NotImplementedError('non-zero mean is not supported yet')
def glorot_initializer(shape, tt_rank=2, dtype=torch.float32):
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)
n_in = np.prod(shape[0])
n_out = np.prod(shape[1])
lamb = 2.0 / (n_in + n_out)
return random_matrix(shape, tt_rank=tt_rank, stddev=np.sqrt(lamb),dtype=dtype)
def matrix_batch_with_random_cores(shape, batch_size=1, tt_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a TT-matrix of given shape with N(mean, stddev^2) cores.
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
matrix_with_random_cores([[2, 2, 2], None])
and
matrix_with_random_cores([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tt_rank: a number or a (d+1)-element array with ranks.
mean: a number, the mean of the normal distribution used for
initializing TT-cores.
stddev: a number, the standard deviation of the normal distribution used
for initializing TT-cores.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorTrain containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank, batch_size=batch_size)
num_dims = shape[0].size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.concatenate([[1], tt_rank, [1]])
tt_rank = tt_rank.astype(int)
tt_cores = [None] * num_dims
for i in range(num_dims):
curr_core_shape = (batch_size, tt_rank[i], shape[0][i], shape[1][i],
tt_rank[i + 1])
tt_cores[i] = torch.randn(curr_core_shape, dtype=dtype) * stddev + mean
return TensorTrainBatch(tt_cores)
def random_matrix_batch(shape, batch_size=1, tt_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a random TT-matrix of the given shape with given mean and stddev.
Entries of the generated matrix (in the full format) will be iid and satisfy
E[x_{i1i2..id}] = mean, Var[x_{i1i2..id}] = stddev^2, but the distribution is
in fact not Gaussian.
In the current implementation only mean 0 is supported. To get
a random_matrix with specified mean but tt_rank greater by 1 you can call
x = ttt.random_matrix(shape, tt_rank, stddev=stddev)
x = mean * t3f.ones_like(x) + x
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
random_matrix([[2, 2, 2], None])
and
random_matrix([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tt_rank: a number or a (d+1)-element array with ranks.
mean: a number, the desired mean for the distribution of entries.
stddev: a number, the desired standard deviation for the distribution of
entries.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorTrain containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank, batch_size=batch_size)
num_dims = shape[0].size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.concatenate([[1], tt_rank, [1]])
tt_rank = tt_rank.astype(int)
var = np.prod(tt_rank)
# Empirically entries of a TT tensor with cores initialized from N(0, 1)
# will have variances np.prod(tt_rank) and mean 0.
# We scale each TT-core to obtain the desired stddev
cr_exponent = -1.0 / (2 * num_dims)
var = np.prod(tt_rank ** cr_exponent)
core_stddev = stddev ** (1.0 / num_dims) * var
tt = matrix_batch_with_random_cores(shape, batch_size=batch_size, tt_rank=tt_rank, stddev=core_stddev, dtype=dtype)
if np.abs(mean) < 1e-8:
return tt
else:
raise NotImplementedError('non-zero mean is not supported yet') | 17,103 | 39.72381 | 119 | py |
tt-pytorch | tt-pytorch-master/t3nsor/initializers_tr.py | import numpy as np
import torch
from t3nsor.tensor_ring import TensorRing
# from t3nsor.tensor_ring import TensorRingBatch
def _validate_input_parameters_tr(is_tensor, shape, **params):
"""Internal function for validating input parameters
Args:
is_tensor: bool, determines whether we attempt to construct a TT-tensor or
a TT-matrix (needed for the correct shape checks).
shape: array, the desired shape of the generated TT object
params: optional, possible values:
batch_size: int, for constructing batches
tr_rank: array or int, desired TT-ranks
"""
if is_tensor:
if len(shape.shape) != 1:
raise ValueError('shape should be 1d array, got %a' % shape)
if np.any(shape < 1):
raise ValueError('all elements in `shape` should be positive, got %a' %
shape)
if not all(isinstance(sh, np.integer) for sh in shape):
raise ValueError('all elements in `shape` should be integers, got %a' %
shape)
else:
if len(shape.shape) != 2:
raise ValueError('shape should be 2d array, got %a' % shape)
if shape[0].size != shape[1].size:
raise ValueError('shape[0] should have the same length as shape[1], but'
'got %d and %d' % (shape[0].size, shape[1].size))
if np.any(shape.flatten() < 1):
raise ValueError('all elements in `shape` should be positive, got %a' %
shape)
if not all(isinstance(sh, np.integer) for sh in shape.flatten()):
raise ValueError('all elements in `shape` should be integers, got %a' %
shape)
if 'batch_size' in params:
batch_size = params['batch_size']
if not isinstance(batch_size, (int, np.integer)):
raise ValueError('`batch_size` should be integer, got %f' % batch_size)
if batch_size < 1:
raise ValueError('Batch size should be positive, got %d' % batch_size)
if 'tr_rank' in params:
tr_rank = params['tr_rank']
if tr_rank.size == 1:
if not isinstance(tr_rank[()], np.integer):
raise ValueError('`tr_rank` should be integer, got %f' % tr_rank[()])
if tr_rank.size > 1:
if not all(isinstance(tt_r, np.integer) for tt_r in tr_rank):
raise ValueError('all elements in `tr_rank` should be integers, got'
' %a' % tr_rank)
if np.any(tr_rank < 1):
raise ValueError('`tr_rank` should be positive, got %a' % tr_rank)
if is_tensor:
if tr_rank.size != 1 and tr_rank.size != (shape.size + 1):
raise ValueError('`tr_rank` array has inappropriate size, expected'
'1 or %d, got %d' % (shape.size + 1, tr_rank.size))
else:
if tr_rank.size != 1 and tr_rank.size != (shape[0].size + 1):
raise ValueError('`tr_rank` array has inappropriate size, expected'
'1 or %d, got %d' % (shape[0].size + 1, tr_rank.size))
def tensor_ones_tr(shape, dtype=torch.float32):
"""Generate TR-tensor of the given shape with all entries equal to 1.
Args:
shape: array representing the shape of the future tensor
dtype: [torch.float32] dtype of the resulting tensor.
name: string, name of the Op.
Returns:
TensorRing object containing a TT-tensor
"""
shape = np.array(shape)
_validate_input_parameters_tr(is_tensor=True, shape=shape)
num_dims = shape.size
tr_cores = num_dims * [None]
for i in range(num_dims):
curr_core_shape = (1, shape[i], 1)
tr_cores[i] = torch.ones(curr_core_shape, dtype=dtype)
return TensorRing(tr_cores)
def tensor_zeros_tr(shape, dtype=torch.float32):
"""Generate TT-tensor of the given shape with all entries equal to 0.
Args:
shape: array representing the shape of the future tensor
dtype: [torch.float32] dtype of the resulting tensor.
name: string, name of the Op.
Returns:
TensorRing object containing a TT-tensor
"""
shape = np.array(shape)
_validate_input_parameters_tr(is_tensor=True, shape=shape)
num_dims = shape.size
tr_cores = num_dims * [None]
for i in range(num_dims):
curr_core_shape = (1, shape[i], 1)
tr_cores[i] = torch.zeros(curr_core_shape, dtype=dtype)
return TensorRing(tr_cores)
def matrix_zeros_tr(shape, rank=2, dtype=torch.float32):
"""Generate TR-matrix of the given shape with all entries equal to 0.
Args:
shape: array representing the shape of the future tensor
dtype: [torch.float32] dtype of the resulting tensor.
name: string, name of the Op.
Returns:
TensorRing object containing a TT-matrix
"""
shape = np.array(shape)
_validate_input_parameters_tr(is_tensor=False, shape=shape)
num_dims = shape[0].size
tr_cores = num_dims * [None]
curr_core_shape = (1, shape[0][0], shape[1][0], rank)
tr_cores[0] = torch.zeros(curr_core_shape, dtype=dtype)
for i in range(1, num_dims - 1):
curr_core_shape = (rank, shape[0][i], shape[1][i], rank)
tr_cores[i] = torch.zeros(curr_core_shape, dtype=dtype)
curr_core_shape = (rank, shape[0][num_dims - 1], shape[1][num_dims - 1], 1)
tr_cores[num_dims - 1] = torch.zeros(curr_core_shape, dtype=dtype)
return TensorRing(tr_cores)
def eye_tr(shape, dtype=torch.float32):
"""Creates an identity TR-matrix.
Args:
shape: array which defines the shape of the matrix row and column
indices.
dtype: [torch.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorRing containing an identity TT-matrix of size
np.prod(shape) x np.prod(shape)
"""
shape = np.array(shape)
# In this special case shape is in the same format as in the TR-tensor case
_validate_input_parameters_tr(is_tensor=True, shape=shape)
num_dims = shape.size
tr_cores = num_dims * [None]
for i in range(num_dims):
curr_core_shape = (1, int(shape[i]), int(shape[i]), 1)
tr_cores[i] = torch.eye_tr(shape[i], dtype=dtype).view(*curr_core_shape)
return TensorRing(tr_cores)
def matrix_with_random_cores_tr(shape, tr_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a TT-matrix of given shape with N(mean, stddev^2) cores.
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
matrix_with_random_cores_tr([[2, 2, 2], None])
and
matrix_with_random_cores_tr([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tr_rank: a number or a (d+1)-element array with ranks.
mean: a number, the mean of the normal distribution used for
initializing TT-cores.
stddev: a number, the standard deviation of the normal distribution used
for initializing TT-cores.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorRing containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tr_rank = np.array(tr_rank)
_validate_input_parameters_tr(is_tensor=False, shape=shape, tr_rank=tr_rank)
num_dims = shape[0].size
if tr_rank.size == 1:
tr_rank = tr_rank * np.ones(num_dims - 1)
tr_rank = np.concatenate([[1], tr_rank, [1]])
tr_rank = tr_rank.astype(int)
tr_cores = [None] * num_dims
for i in range(num_dims):
curr_core_shape = (tr_rank[i], shape[0][i], shape[1][i],
tr_rank[i + 1])
tr_cores[i] = torch.randn(curr_core_shape, dtype=dtype) * stddev + mean
return TensorRing(tr_cores)
def random_matrix_tr(shape, tr_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a random TT-matrix of the given shape with given mean and stddev.
Entries of the generated matrix (in the full format) will be iid and satisfy
E[x_{i1i2..id}] = mean, Var[x_{i1i2..id}] = stddev^2, but the distribution is
in fact not Gaussian.
In the current implementation only mean 0 is supported. To get
a random_matrix_tr with specified mean but tr_rank greater by 1 you can call
x = ttt.random_matrix_tr(shape, tr_rank, stddev=stddev)
x = mean * t3f.ones_like(x) + x
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
random_matrix_tr([[2, 2, 2], None])
and
random_matrix_tr([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tr_rank: a number or a (d+1)-element array with ranks.
mean: a number, the desired mean for the distribution of entries.
stddev: a number, the desired standard deviation for the distribution of
entries.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorRing containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tr_rank = np.array(tr_rank)
_validate_input_parameters_tr(is_tensor=False, shape=shape, tr_rank=tr_rank)
num_dims = shape[0].size
if tr_rank.size == 1:
tr_rank = tr_rank * np.ones(num_dims + 1)
#tr_rank = np.concatenate([[1], tr_rank, [1]])
tr_rank = tr_rank.astype(int)
var = np.prod(tr_rank)
# Empirically entries of a TT tensor with cores initialized from N(0, 1)
# will have variances np.prod(tr_rank) and mean 0.
# We scale each TT-core to obtain the desired stddev
cr_exponent = -1.0 / (2 * num_dims)
var = np.prod(tr_rank ** cr_exponent)
core_stddev = stddev ** (1.0 / num_dims) * var
tt = matrix_with_random_cores_tr(shape, tr_rank=tr_rank, stddev=core_stddev, dtype=dtype)
if np.abs(mean) < 1e-8:
return tt
else:
raise NotImplementedError('non-zero mean is not supported yet')
def glorot_initializer_tr(shape, tr_rank=2, dtype=torch.float32):
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tr_rank = np.array(tr_rank)
_validate_input_parameters_tr(is_tensor=False, shape=shape, tr_rank=tr_rank)
n_in = np.prod(shape[0])
n_out = np.prod(shape[1])
lamb = 2.0 / (n_in + n_out)
return random_matrix_tr(shape, tr_rank=tr_rank, stddev=np.sqrt(lamb),dtype=dtype)
def matrix_batch_with_random_cores_tr(shape, batch_size=1, tr_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a TT-matrix of given shape with N(mean, stddev^2) cores.
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
matrix_with_random_cores_tr([[2, 2, 2], None])
and
matrix_with_random_cores_tr([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tr_rank: a number or a (d+1)-element array with ranks.
mean: a number, the mean of the normal distribution used for
initializing TT-cores.
stddev: a number, the standard deviation of the normal distribution used
for initializing TT-cores.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorRing containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tr_rank = np.array(tr_rank)
_validate_input_parameters_tr(is_tensor=False, shape=shape, tr_rank=tr_rank, batch_size=batch_size)
num_dims = shape[0].size
if tr_rank.size == 1:
tr_rank = tr_rank * np.ones(num_dims - 1)
tr_rank = np.concatenate([[1], tr_rank, [1]])
tr_rank = tr_rank.astype(int)
tr_cores = [None] * num_dims
for i in range(num_dims):
curr_core_shape = (batch_size, tr_rank[i], shape[0][i], shape[1][i],
tr_rank[i + 1])
tr_cores[i] = torch.randn(curr_core_shape, dtype=dtype) * stddev + mean
return TensorRingBatch(tr_cores)
def random_matrix_tr_batch_tr(shape, batch_size=1, tr_rank=2, mean=0., stddev=1.,
dtype=torch.float32):
"""Generate a random TT-matrix of the given shape with given mean and stddev.
Entries of the generated matrix (in the full format) will be iid and satisfy
E[x_{i1i2..id}] = mean, Var[x_{i1i2..id}] = stddev^2, but the distribution is
in fact not Gaussian.
In the current implementation only mean 0 is supported. To get
a random_matrix_tr with specified mean but tr_rank greater by 1 you can call
x = ttt.random_matrix_tr(shape, tr_rank, stddev=stddev)
x = mean * t3f.ones_like(x) + x
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
random_matrix_tr([[2, 2, 2], None])
and
random_matrix_tr([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tr_rank: a number or a (d+1)-element array with ranks.
mean: a number, the desired mean for the distribution of entries.
stddev: a number, the desired standard deviation for the distribution of
entries.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorRing containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tr_rank = np.array(tr_rank)
_validate_input_parameters_tr(is_tensor=False, shape=shape, tr_rank=tr_rank, batch_size=batch_size)
num_dims = shape[0].size
if tr_rank.size == 1:
tr_rank = tr_rank * np.ones(num_dims - 1)
tr_rank = np.concatenate([[1], tr_rank, [1]])
tr_rank = tr_rank.astype(int)
var = np.prod(tr_rank)
# Empirically entries of a TT tensor with cores initialized from N(0, 1)
# will have variances np.prod(tr_rank) and mean 0.
# We scale each TT-core to obtain the desired stddev
cr_exponent = -1.0 / (2 * num_dims)
var = np.prod(tr_rank ** cr_exponent)
core_stddev = stddev ** (1.0 / num_dims) * var
tt = matrix_batch_with_random_cores_tr(shape, batch_size=batch_size, tr_rank=tr_rank, stddev=core_stddev, dtype=dtype)
if np.abs(mean) < 1e-8:
return tt
else:
raise NotImplementedError('non-zero mean is not supported yet') | 17,196 | 39.945238 | 122 | py |
tt-pytorch | tt-pytorch-master/t3nsor/utils.py | from scipy.stats import entropy
import numpy as np
from sympy.utilities.iterables import multiset_partitions
from sympy.ntheory import factorint
from itertools import cycle, islice
import torch
MODES = ['ascending', 'descending', 'mixed']
CRITERIONS = ['entropy', 'var']
def _to_list(p):
res = []
for k, v in p.items():
res += [k, ] * v
return res
def _roundup(n, k):
return int(np.ceil(n / 10**k)) * 10**k
def _roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def _get_all_factors(n, d=3, mode='ascending'):
p = _factorint2(n)
if len(p) < d:
p = p + [1, ] * (d - len(p))
if mode == 'ascending':
def prepr(x):
return tuple(sorted([np.prod(_) for _ in x]))
elif mode == 'descending':
def prepr(x):
return tuple(sorted([np.prod(_) for _ in x], reverse=True))
elif mode == 'mixed':
def prepr(x):
x = sorted(np.prod(_) for _ in x)
N = len(x)
xf, xl = x[:N//2], x[N//2:]
return tuple(_roundrobin(xf, xl))
else:
raise ValueError('Wrong mode specified, only {} are available'.format(MODES))
raw_factors = multiset_partitions(p, d)
clean_factors = [prepr(f) for f in raw_factors]
clean_factors = list(set(clean_factors))
return clean_factors
def _factorint2(p):
return _to_list(factorint(p))
def auto_shape(n, d=3, criterion='entropy', mode='ascending'):
factors = _get_all_factors(n, d=d, mode=mode)
if criterion == 'entropy':
weights = [entropy(f) for f in factors]
elif criterion == 'var':
weights = [-np.var(f) for f in factors]
else:
raise ValueError('Wrong criterion specified, only {} are available'.format(CRITERIONS))
i = np.argmax(weights)
return list(factors[i])
def suggest_shape(n, d=3, criterion='entropy', mode='ascending'):
weights = []
for i in range(len(str(n))):
n_i = _roundup(n, i)
if criterion == 'entropy':
weights.append(entropy(auto_shape(n_i, d=d, mode=mode, criterion=criterion)))
elif criterion == 'var':
weights.append(-np.var(auto_shape(n_i, d=d, mode=mode, criterion=criterion)))
else:
raise ValueError('Wrong criterion specified, only {} are available'.format(CRITERIONS))
i = np.argmax(weights)
factors = auto_shape(int(_roundup(n, i)), d=d, mode=mode, criterion=criterion)
return factors
def svd_fix(x):
n = x.shape[0]
m = x.shape[1]
if n > m:
u, s, v = torch.svd(x)
else:
u, s, v = torch.svd(x.t())
v, u = u, v
return u, s, v
def ind2sub(siz, idx):
n = len(siz)
b = len(idx)
subs = []
k = np.cumprod(siz[:-1])
k = np.concatenate((np.ones(1), k))
for i in range(n - 1, -1, -1):
subs.append(torch.floor(idx.float() / k[i]).long())
idx = torch.fmod(idx, k[i])
return torch.stack(subs[::-1], dim=1)
| 3,321 | 25.576 | 99 | py |
tt-pytorch | tt-pytorch-master/t3nsor/decompositions.py | import numpy as np
import torch
from t3nsor.tensor_train import TensorTrain
from t3nsor.utils import svd_fix
def to_tt_tensor(tens, max_tt_rank=10, epsilon=None):
shape = tens.shape
d = len(shape)
max_tt_rank = np.array(max_tt_rank).astype(np.int32)
if max_tt_rank.size == 1:
max_tt_rank = [int(max_tt_rank), ] * (d+1)
ranks = [1] * (d + 1)
tt_cores = []
for core_idx in range(d - 1):
curr_mode = shape[core_idx]
rows = ranks[core_idx] * curr_mode
tens = tens.view(rows, -1)
columns = tens.shape[1]
u, s, v = svd_fix(tens)
if max_tt_rank[core_idx + 1] == 1:
ranks[core_idx + 1] = 1
else:
ranks[core_idx + 1] = min(max_tt_rank[core_idx + 1], rows, columns)
u = u[:, 0:ranks[core_idx + 1]]
s = s[0:ranks[core_idx + 1]]
v = v[:, 0:ranks[core_idx + 1]]
core_shape = (ranks[core_idx], curr_mode, ranks[core_idx + 1])
tt_cores.append(u.view(*core_shape))
tens = torch.matmul(torch.diag(s), v.permute(1, 0))
last_mode = shape[-1]
core_shape = (ranks[d - 1], last_mode, ranks[d])
tt_cores.append(tens.view(core_shape))
return TensorTrain(tt_cores, convert_to_tensors=False)
def to_tt_matrix(mat, shape, max_tt_rank=10, epsilon=None):
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1])).astype(int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0])).astype(int)
shape = np.array(shape)
def np2int(x):
return list(map(int, x))
tens = mat.view(*np2int(shape.flatten()))
d = len(shape[0])
# transpose_idx = 0, d, 1, d+1 ...
transpose_idx = np.arange(2 * d).reshape(2, d).T.flatten()
transpose_idx = np2int(transpose_idx)
tens = tens.permute(*transpose_idx)
new_shape = np2int(np.prod(shape, axis=0))
tens = tens.contiguous().view(*new_shape)
tt_tens = to_tt_tensor(tens, max_tt_rank, epsilon)
tt_cores = []
tt_ranks = tt_tens.ranks
for core_idx in range(d):
curr_core = tt_tens.tt_cores[core_idx]
curr_rank = tt_ranks[core_idx]
next_rank = tt_ranks[core_idx + 1]
curr_core_new_shape = (curr_rank, shape[0, core_idx], shape[1, core_idx], next_rank)
curr_core = curr_core.view(*curr_core_new_shape)
tt_cores.append(curr_core)
return TensorTrain(tt_cores, convert_to_tensors=False)
| 2,562 | 31.443038 | 92 | py |
tt-pytorch | tt-pytorch-master/t3nsor/layers.py | import torch
import numpy as np
import torch.nn as nn
import t3nsor as t3
class TTEmbedding(nn.Module):
def __init__(self,
init=None,
shape=None,
voc_size=None,
emb_size=None,
auto_shapes=None,
auto_shape_mode='ascending',
auto_shape_criterion='entropy',
d=3,
tt_rank=8,
batch_dim_last=None,
padding_idx=None,
naive=False):
super(TTEmbedding, self).__init__()
if auto_shapes:
voc_quantization = t3.utils.suggest_shape(
voc_size, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
emb_quantization = t3.utils.auto_shape(
emb_size, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
shape = [voc_quantization, emb_quantization]
self.shape = shape
else:
self.shape = shape
if init is None:
if shape is None:
raise ValueError('if init is not provided,'
' please specify shape')
else:
self.shape = init.raw_shape
if init is None:
init = t3.glorot_initializer(self.shape, tt_rank=tt_rank)
self.tt_matrix = init.to_parameter()
self.parameters = self.tt_matrix.parameter
# for p in self.parameters():
# p.name = 'tt_core'
self.batch_dim_last = batch_dim_last
self.voc_size = int(np.prod(self.shape[0]))
self.emb_size = int(np.prod(self.shape[1]))
self.voc_quant = self.shape[0]
self.emb_quant = self.shape[1]
self.padding_idx = padding_idx
self.naive = naive
def forward(self, x):
xshape = list(x.shape)
xshape_new = xshape + [self.emb_size, ]
x = x.view(-1)
# x_ind = t3.ind2sub(self.voc_quant, x)
# rows = t3.gather_rows(self.tt_matrix, x_ind)
#
# rows = rows.view(x.shape[0], -1)
if self.naive:
full = t3.naive_full(self.tt_matrix)
else:
full = self.tt_matrix.full()
rows = full[x]
if self.padding_idx is not None:
rows = torch.where(x.view(-1, 1) != self.padding_idx, rows, torch.zeros_like(rows))
rows = rows.view(*xshape_new)
return rows.to(x.device)
class TREmbedding(nn.Module):
def __init__(self,
init=None,
shape=None,
voc_size=None,
emb_size=None,
auto_shapes=None,
auto_shape_mode='ascending',
auto_shape_criterion='entropy',
d=3,
tr_rank=8,
batch_dim_last=None,
padding_idx=None,
naive=False):
super(TREmbedding, self).__init__()
if auto_shapes:
voc_quantization = t3.utils.suggest_shape(
voc_size, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
emb_quantization = t3.utils.auto_shape(
emb_size, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
shape = [voc_quantization, emb_quantization]
self.shape = shape
else:
self.shape = shape
if init is None:
if shape is None:
raise ValueError('if init is not provided,'
' please specify shape')
else:
self.shape = init.raw_shape
if init is None:
init = t3.glorot_initializer_tr(self.shape, tr_rank=tr_rank)
self.tr_matrix = init.to_parameter()
self.parameters = self.tr_matrix.parameter
# for p in self.parameters():
# p.name = 'tt_core'
self.batch_dim_last = batch_dim_last
self.voc_size = int(np.prod(self.shape[0]))
self.emb_size = int(np.prod(self.shape[1]))
self.voc_quant = self.shape[0]
self.emb_quant = self.shape[1]
self.padding_idx = padding_idx
self.naive = naive
def forward(self, x):
xshape = list(x.shape)
xshape_new = xshape + [self.emb_size, ]
x = x.view(-1)
# x_ind = t3.ind2sub(self.voc_quant, x)
# rows = t3.gather_rows(self.tr_matrix, x_ind)
#
# rows = rows.view(x.shape[0], -1)
if self.naive:
full = t3.naive_full(self.tr_matrix)
else:
full = self.tr_matrix.full()
rows = full[x]
if self.padding_idx is not None:
rows = torch.where(x.view(-1, 1) != self.padding_idx, rows, torch.zeros_like(rows))
rows = rows.view(*xshape_new)
return rows.to(x.device)
FORWARD_MODES = ["auto", "naive", "custom"]
class TTLinear(nn.Module):
def __init__(self, in_features=None, out_features=None, bias=True, init=None, shape=None,
auto_shapes=True, d=3, tt_rank=8, auto_shape_mode='ascending',
auto_shape_criterion='entropy', forward_mode=None
):
super(TTLinear, self).__init__()
if forward_mode not in FORWARD_MODES:
raise ValueError(
"Only {} are available, got {}".format(FORWARD_MODES, forward_mode)
)
if auto_shapes:
if in_features is None or out_features is None:
raise ValueError("Shape is not specified")
in_quantization = t3.utils.auto_shape(
in_features, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
out_quantization = t3.utils.auto_shape(
out_features, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
shape = [in_quantization, out_quantization]
if init is None:
if shape is None:
raise ValueError(
"if init is not provided, please specify shape, or set auto_shapes=True")
else:
shape = init.raw_shape
if init is None:
init = t3.glorot_initializer(shape, tt_rank=tt_rank)
self.shape = shape
self.weight = init.to_parameter()
self.parameters = self.weight.parameter
if forward_mode == "naive":
self.mm_op = t3.naive_dense_tt_matmul
elif forward_mode == "auto":
self.mm_op = t3.dense_tt_matmul
elif forward_mode == "custom":
self.fun = t3.ops.TTLinearFunction(shape=self.shape)
def helper(x, weight):
cores = weight.tt_cores
return self.fun.apply(x, *cores)
self.mm_op = helper
else:
raise NotImplementedError()
if bias:
self.bias = torch.nn.Parameter(1e-3 * torch.ones(out_features))
else:
self.register_parameter('bias', None)
def forward(self, x):
weight = self.weight
if self.bias is None:
return self.mm_op(x, weight)
else:
return self.mm_op(x, weight) + self.bias
class TRLinear(nn.Module):
def __init__(self, in_features=None, out_features=None, bias=True, init=None, shape=None,
auto_shapes=True, d=3, tt_rank=8, auto_shape_mode='ascending',
auto_shape_criterion='entropy', naive=False
):
super(TRLinear, self).__init__()
if auto_shapes:
if in_features is None or out_features is None:
raise ValueError("Shape is not specified")
in_quantization = t3.utils.auto_shape(
in_features, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
out_quantization = t3.utils.auto_shape(
out_features, d=d, criterion=auto_shape_criterion, mode=auto_shape_mode)
shape = [in_quantization, out_quantization]
if init is None:
if shape is None:
raise ValueError(
"if init is not provided, please specify shape, or set auto_shapes=True")
else:
shape = init.raw_shape
if init is None:
init = t3.glorot_initializer_tr(shape, tr_rank=tt_rank)
self.shape = shape
self.weight = init.to_parameter()
self.parameters = self.weight.parameter
if naive:
self.mm_op = t3.naive_dense_tr_matmul
else:
raise ValueError('Not implemented, use naive option.')
if bias:
self.bias = torch.nn.Parameter(1e-3 * torch.ones(out_features))
else:
self.register_parameter('bias', None)
def forward(self, x):
weight = self.weight
if self.bias is None:
return self.mm_op(x, weight)
else:
return self.mm_op(x, weight) + self.bias
| 8,890 | 31.213768 | 95 | py |
tt-pytorch | tt-pytorch-master/t3nsor/tensor_train.py | import torch
import numpy as np
import torch.nn as nn
class TensorTrain(object):
def __init__(self, tt_cores, shape=None, tt_ranks=None, convert_to_tensors=True):
#tt_cores = list(tt_cores)
if convert_to_tensors:
for i in range(len(tt_cores)):
tt_cores[i] = torch.Tensor(tt_cores[i])
self._tt_cores = tt_cores
if len(self._tt_cores[0].shape) == 4:
self._is_tt_matrix = True
else:
self._is_tt_matrix = False
if self._is_tt_matrix:
self._raw_shape = [[tt_core.shape[1] for tt_core in self._tt_cores],
[tt_core.shape[2] for tt_core in self._tt_cores]]
self._shape = [int(np.prod(self._raw_shape[0])), int(np.prod(self._raw_shape[1]))]
self._ndims = len(self._raw_shape[0])
else:
self._raw_shape = [tt_core.shape[1] for tt_core in self._tt_cores]
self._shape = [tt_core.shape[1] for tt_core in self._tt_cores]
self._ndims = len(self._raw_shape)
self._ranks = [tt_core.shape[0] for tt_core in self._tt_cores] + [1, ]
self._is_parameter = False
self._parameter = None
self._dof = np.sum([np.prod(list(tt_core.shape)) for tt_core in self._tt_cores])
self._total = np.prod(self._shape)
@property
def tt_cores(self):
"""A list of TT-cores.
Returns:
A list of 3d or 4d tensors of shape
"""
return self._tt_cores
@property
def cores(self):
"""A list of TT-cores.
Returns:
A list of 3d or 4d tensors of shape
"""
return self._tt_cores
@property
def raw_shape(self):
return self._raw_shape
@property
def is_tt_matrix(self):
return self._is_tt_matrix
@property
def shape(self):
return self._shape
@property
def ranks(self):
return self._ranks
@property
def ndims(self):
return self._ndims
@property
def is_parameter(self):
return self._is_parameter
@property
def parameter(self):
if self.is_parameter:
return self._parameter
else:
raise ValueError('Not a parameter, run .to_parameter() first')
@property
def dof(self):
return self._dof
@property
def total(self):
return self._total
def to(self, device):
new_cores = []
for core in self.tt_cores:
new_cores.append(core.to(device))
return TensorTrain(new_cores, convert_to_tensors=False)
def detach(self):
new_cores = []
for core in self.tt_cores:
new_cores.append(core.detach())
return TensorTrain(new_cores, convert_to_tensors=False)
def requires_grad_(self, requires_grad=True):
new_cores = []
for core in self.tt_cores:
new_cores.append(core.requires_grad_(requires_grad))
return TensorTrain(new_cores, convert_to_tensors=False)
def to_parameter(self):
new_cores = []
for core in self.tt_cores:
core = nn.Parameter(core)
core.is_tt = True
new_cores.append(core)
tt_p = TensorTrain(new_cores, convert_to_tensors=False)
tt_p._parameter = nn.ParameterList(tt_p.tt_cores)
tt_p._is_parameter = True
return tt_p
def full(self):
num_dims = self.ndims
ranks = self.ranks
shape = self.shape
raw_shape = self.raw_shape
res = self.tt_cores[0]
for i in range(1, num_dims):
res = res.view(-1, ranks[i])
curr_core = self.tt_cores[i].view(ranks[i], -1)
res = torch.matmul(res, curr_core)
if self.is_tt_matrix:
intermediate_shape = []
for i in range(num_dims):
intermediate_shape.append(raw_shape[0][i])
intermediate_shape.append(raw_shape[1][i])
res = res.view(*intermediate_shape)
transpose = []
for i in range(0, 2 * num_dims, 2):
transpose.append(i)
for i in range(1, 2 * num_dims, 2):
transpose.append(i)
res = res.permute(*transpose)
if self.is_tt_matrix:
res = res.contiguous().view(*shape)
else:
res = res.view(*shape)
return res
def __str__(self):
"""A string describing the TensorTrain object, its TT-rank, and shape."""
shape = self.shape
tt_ranks = self.ranks
device = self.tt_cores[0].device
compression_rate = self.total / self.dof
if self.is_tt_matrix:
raw_shape = self.raw_shape
return "A TT-Matrix of size %d x %d, underlying tensor" \
"shape: %s x %s, TT-ranks: %s " \
"\n on device '%s' with compression rate %.2f" % (shape[0], shape[1],
raw_shape[0], raw_shape[1],
tt_ranks, device, compression_rate)
else:
return "A Tensor Train of shape %s, TT-ranks: %s" \
"\n on device '%s' with compression rate %.2f" % (shape, tt_ranks, device, compression_rate)
class TensorTrainBatch():
def __init__(self, tt_cores, shape=None, tt_ranks=None, convert_to_tensors=True):
#tt_cores = list(tt_cores)
if convert_to_tensors:
for i in range(len(tt_cores)):
tt_cores[i] = torch.Tensor(tt_cores[i])
self._tt_cores = tt_cores
self._batch_size = self._tt_cores[0].shape[0]
if len(self._tt_cores[0].shape) == 5:
self._is_tt_matrix = True
else:
self._is_tt_matrix = False
if self._is_tt_matrix:
self._raw_shape = [[tt_core.shape[2] for tt_core in self._tt_cores],
[tt_core.shape[3] for tt_core in self._tt_cores]]
self._shape = [self._batch_size, int(
np.prod(self._raw_shape[0])), int(np.prod(self._raw_shape[1]))]
self._ndims = len(self._raw_shape[0])
else:
self._raw_shape = [tt_core.shape[2] for tt_core in self._tt_cores]
self._shape = [self._batch_size, ] + [tt_core.shape[2] for tt_core in self._tt_cores]
self._ndims = len(self._raw_shape)
self._ranks = [tt_core.shape[1] for tt_core in self._tt_cores] + [1, ]
@property
def tt_cores(self):
"""A list of TT-cores.
Returns:
A list of 4d or 5d tensors.
"""
return self._tt_cores
@property
def raw_shape(self):
return self._raw_shape
@property
def is_tt_matrix(self):
return self._is_tt_matrix
@property
def shape(self):
return self._shape
@property
def ranks(self):
return self._ranks
@property
def ndims(self):
return self._ndims
@property
def batch_size(self):
return self._batch_size
def to(self, device):
new_cores = []
for core in self.tt_cores:
new_cores.append(core.to(device))
return TensorTrainBatch(new_cores, convert_to_tensors=False)
def detach(self):
new_cores = []
for core in self.tt_cores:
new_cores.append(core.detach())
return TensorTrainBatch(new_cores, convert_to_tensors=False)
def requires_grad_(self, requires_grad=True):
new_cores = []
for core in self.tt_cores:
new_cores.append(core.requires_grad_(requires_grad))
return TensorTrainBatch(new_cores, convert_to_tensors=False)
def full(self):
num_dims = self.ndims
ranks = self.ranks
shape = self.shape
raw_shape = self.raw_shape
res = self.tt_cores[0]
batch_size = self.batch_size
for i in range(1, num_dims):
res = res.view(batch_size, -1, ranks[i])
curr_core = self.tt_cores[i].view(batch_size, ranks[i], -1)
res = torch.einsum('oqb,obw->oqw', (res, curr_core))
if self.is_tt_matrix:
intermediate_shape = [batch_size]
for i in range(num_dims):
intermediate_shape.append(raw_shape[0][i])
intermediate_shape.append(raw_shape[1][i])
res = res.view(*intermediate_shape)
transpose = [0]
for i in range(0, 2 * num_dims, 2):
transpose.append(i + 1)
for i in range(1, 2 * num_dims, 2):
transpose.append(i + 1)
res = res.permute(transpose)
if self.is_tt_matrix:
res = res.contiguous().view(*shape)
else:
res = res.view(*shape)
return res
def __str__(self):
"""A string describing the TensorTrainBatch, its TT-rank and shape."""
shape = self.shape
tt_ranks = self.ranks
batch_size_str = str(self.batch_size)
device = self.tt_cores[0].device
if self.is_tt_matrix:
raw_shape = self.raw_shape
type_str = 'TT-matrices'
return "A %s element batch of %s of size %d x %d, underlying tensor " \
"shape: %s x %s, TT-ranks: %s" \
"on device '%s' " % (batch_size_str, type_str,
shape[1], shape[2],
raw_shape[0], raw_shape[1],
tt_ranks, device)
else:
type_str = 'Tensor Trains'
return "A %s element batch of %s of shape %s, TT-ranks: %s \n on device '%s'" % \
(batch_size_str, type_str, shape[1:], tt_ranks, device)
| 9,783 | 30.869707 | 111 | py |
tt-pytorch | tt-pytorch-master/t3nsor/ops.py | from t3nsor import TensorTrainBatch
from t3nsor import TensorTrain
from torch.autograd import Function
import torch
def gather_rows(tt_mat, inds):
"""
inds -- list of indices of shape batch_size x d
d = len(tt_mat.raw_shape[1])
"""
cores = tt_mat.cores
slices = []
batch_size = int(inds.shape[0])
ranks = [int(cores.shape[0]) for cores in tt_mat.cores] + [1, ]
for k, core in enumerate(cores):
i = inds[:, k]
cur_slice = torch.index_select(core, 1, i)
# r x B x M x r
if k == 0:
res = cur_slice.transpose(0, 1)
# B x r x M x r
else:
res = res.contiguous().view(batch_size, -1, ranks[k])
# B x rM x r
curr_core = cur_slice.view(ranks[k], batch_size, -1)
# r x B x Mr
res = torch.einsum('oqb,bow->oqw', (res, curr_core))
res = torch.einsum('i...i->...', res.view(batch_size, ranks[0], res.shape[1] // ranks[0], -1, ranks[0]).transpose(0, 1))
return res
def transpose(tt_matrix):
cores = []
for core in tt_matrix.tt_cores:
cores.append(core.transpose(1, 2))
return TensorTrain(cores)
def transpose_cores(tt_cores):
cores = []
for core in tt_cores:
cores.append(core.transpose(1, 2))
return cores
def tt_dense_matmul(tt_matrix_a, matrix_b):
"""Multiplies a TT-matrix by a regular matrix, returns a regular matrix.
Args:
tt_matrix_a: `TensorTrain` object containing a TT-matrix of size M x N
matrix_b: torch.Tensor of size N x P
Returns
torch.Tensor of size M x P
"""
ndims = tt_matrix_a.ndims
a_columns = tt_matrix_a.shape[1]
b_rows = matrix_b.shape[0]
if a_columns is not None and b_rows is not None:
if a_columns != b_rows:
raise ValueError('Arguments shapes should align got %d and %d instead.' %
(tt_matrix_a.shape, matrix_b.shape))
a_shape = tt_matrix_a.shape
a_raw_shape = tt_matrix_a.raw_shape
b_shape = matrix_b.shape
a_ranks = tt_matrix_a.ranks
# If A is (i0, ..., id-1) x (j0, ..., jd-1) and B is (j0, ..., jd-1) x K,
# data is (K, j0, ..., jd-2) x jd-1 x 1
data = matrix_b.transpose(0, 1)
data = data.view(-1, a_raw_shape[1][-1], 1)
for core_idx in reversed(range(ndims)):
curr_core = tt_matrix_a.tt_cores[core_idx]
# On the k = core_idx iteration, after applying einsum the shape of data
# becomes ik x (ik-1..., id-1, K, j0, ..., jk-1) x rank_k
data = torch.einsum('aijb,rjb->ira', curr_core, data)
if core_idx > 0:
# After reshape the shape of data becomes
# (ik, ..., id-1, K, j0, ..., jk-2) x jk-1 x rank_k
new_data_shape = (-1, a_raw_shape[1][core_idx - 1], a_ranks[core_idx])
data = data.contiguous().view(new_data_shape)
# At the end the shape of the data is (i0, ..., id-1) x K
return data.view(a_shape[0], b_shape[1])
def dense_tt_matmul(matrix_a, tt_matrix_b):
ndims = tt_matrix_b.ndims
a_columns = matrix_a.shape[1]
b_rows = tt_matrix_b.shape[0]
if a_columns is not None and b_rows is not None:
if a_columns != b_rows:
raise ValueError('Arguments shapes should align got %d and %d instead.' %
(matrix_a.shape, tt_matrix_b.shape))
a_shape = matrix_a.shape
b_shape = tt_matrix_b.shape
b_raw_shape = tt_matrix_b.raw_shape
data = matrix_a
new_shape = [-1, ] + b_raw_shape[0] + [1, ]
data = data.view(*new_shape)
for core_idx in range(ndims):
curr_core = tt_matrix_b.tt_cores[core_idx]
data = torch.tensordot(data, curr_core, dims=[[1, -1], [1, 0]])
return data.view(a_shape[0], b_shape[1])
def naive_dense_tt_matmul(matrix_a, tt_matrix_b):
ndims = tt_matrix_b.ndims
a_columns = matrix_a.shape[1]
b_rows = tt_matrix_b.shape[0]
if a_columns is not None and b_rows is not None:
if a_columns != b_rows:
raise ValueError('Arguments shapes should align got %d and %d instead.' %
(matrix_a.shape, tt_matrix_b.shape))
assert ndims == 3
core0 = tt_matrix_b.tt_cores[0] # 1 x n x m x r
core1 = tt_matrix_b.tt_cores[1] # r x n x m x r
core2 = tt_matrix_b.tt_cores[2] # r x n x m x 1
input = matrix_a.view(-1, core0.shape[1], core1.shape[1], core2.shape[1])
B = input.shape[0]
full = torch.einsum('abcd,defg,ghij->bcefhi', core0, core1, core2)
res = torch.einsum('abcd,bqcsdx->aqsx', input, full)
return res.contiguous().view(B, -1)
def _naive_dense_tt_cores(matrix_a, tt_cores):
core0 = tt_cores[0] # 1 x n x m x r
core1 = tt_cores[1] # r x n x m x r
core2 = tt_cores[2] # r x n x m x 1
input = matrix_a.view(-1, core0.shape[1], core1.shape[1], core2.shape[1])
B = input.shape[0]
full = torch.einsum('abcd,defg,ghij->bcefhi', core0, core1, core2)
res = torch.einsum('abcd,bqcsdx->aqsx', input, full)
return res.contiguous().view(B, -1)
def naive_full(tt_a):
ndims = tt_a.ndims
assert ndims == 3
try:
# TT-Embedding
core0, core1, core2 = tt_a.tt_cores
except:
# TR-Embedding
core0, core1, core2 = tt_a.tr_cores
full = torch.einsum('abcd,defg,ghia->bcefhi', core0, core1, core2)
full = full.reshape(tt_a.shape[0], tt_a.shape[1])
return full
def naive_dense_tr_matmul(matrix_a, tr_matrix_b):
ndims = tr_matrix_b.ndims
a_columns = matrix_a.shape[1]
b_rows = tr_matrix_b.shape[0]
if a_columns is not None and b_rows is not None:
if a_columns != b_rows:
raise ValueError('Arguments shapes should align got %d and %d instead.' %
(matrix_a.shape, tr_matrix_b.shape))
assert ndims == 3
core0 = tr_matrix_b.tr_cores[0] # 1 x n x m x r
core1 = tr_matrix_b.tr_cores[1] # r x n x m x r
core2 = tr_matrix_b.tr_cores[2] # r x n x m x 1
input = matrix_a.view(-1, core0.shape[1], core1.shape[1], core2.shape[1])
B = input.shape[0]
full = torch.einsum('abcd,defg,ghia->bcefhi', core0, core1, core2)
res = torch.einsum('abcd,bqcsdx->aqsx', input, full)
return res.contiguous().view(B, -1)
class TTLinearFunction(Function):
"""Multiplies a TT-matrix (weights) by a regular matrix (input), returns a regular matrix.
Args:
weight: `TensorTrain` object containing a TT-matrix of size M x N
input: torch.Tensor of size N x P
Returns
torch.Tensor of size M x P
"""
@staticmethod
def _dtdc0(dydt_reshaped, cores):
return torch.einsum('ijkabc,xjbt,tkcs->siax', dydt_reshaped, cores[1], cores[2])
@staticmethod
def _dtdc1(dydt_reshaped, cores):
# dydt_reshape (m1 x m2 x m3) x (n1 x n2 x n3)
# output: r1 x m2 x n2 x r2
# core0: 1 x m1 x n1 x r1
# core2: r2 x m3 x n3 x 1
return torch.einsum('ijkabc,xiat,pkcx->tjbp', dydt_reshaped, cores[0], cores[2])
@staticmethod
def _dtdc2(dydt_reshaped, cores):
# dydt_reshape (m1 x m2 x m3) x (n1 x n2 x n3)
# output: r2 x m3 x n3 x 1
# core0: 1 x m1 x n1 x r1
# core1: r1 x m2 x n2 x r2
return torch.einsum('ijkabc,xiat,tjbs->skcx', dydt_reshaped, cores[0], cores[1])
@staticmethod
def forward(ctx, input, *tt_cores):
ctx.save_for_backward(input, *tt_cores)
data = _naive_dense_tt_cores(input, tt_cores)
return data
@staticmethod
def backward(ctx, grad_output):
inp, *tt_cores = ctx.saved_tensors
grad_input = grad_c0 = grad_c1 = grad_c2 = None
grad_input = _naive_dense_tt_cores(grad_output, transpose_cores(tt_cores))
dydt = inp.t() @ grad_output
shape = [core.shape[1] for core in tt_cores] + [core.shape[2] for core in tt_cores]
dydt_reshaped = dydt.view(*shape)
grad_c0 = TTLinearFunction._dtdc0(dydt_reshaped, tt_cores)
grad_c1 = TTLinearFunction._dtdc1(dydt_reshaped, tt_cores)
grad_c2 = TTLinearFunction._dtdc2(dydt_reshaped, tt_cores)
return grad_input, grad_c0, grad_c1, grad_c2
| 8,157 | 32.850622 | 124 | py |
tt-pytorch | tt-pytorch-master/t3nsor/tensor_ring.py | import torch
import numpy as np
import torch.nn as nn
import t3nsor as t3
class TensorRing(object):
def __init__(self, tr_cores, shape=None, tr_ranks=None, convert_to_tensors=True):
#tr_cores = list(tr_cores)
if convert_to_tensors:
for i in range(len(tr_cores)):
tr_cores[i] = torch.Tensor(tr_cores[i])
self._tr_cores = tr_cores
if len(self._tr_cores[0].shape) == 4:
self._is_tr_matrix = True
else:
self._is_tr_matrix = False
if self._is_tr_matrix:
self._raw_shape = [[tr_core.shape[1] for tr_core in self._tr_cores],
[tr_core.shape[2] for tr_core in self._tr_cores]]
self._shape = [int(np.prod(self._raw_shape[0])), int(np.prod(self._raw_shape[1]))]
self._ndims = len(self._raw_shape[0])
else:
self._raw_shape = [tr_core.shape[1] for tr_core in self._tr_cores]
self._shape = [tr_core.shape[1] for tr_core in self._tr_cores]
self._ndims = len(self._raw_shape)
self._ranks = [tr_core.shape[0] for tr_core in self._tr_cores] + [1, ]
self._is_parameter = False
self._parameter = None
self._dof = np.sum([np.prod(list(tr_core.shape)) for tr_core in self._tr_cores])
self._total = np.prod(self._shape)
@property
def tr_cores(self):
"""A list of TR-cores.
Returns:
A list of 3d or 4d tensors of shape
"""
return self._tr_cores
@property
def cores(self):
"""A list of TR-cores.
Returns:
A list of 3d or 4d tensors of shape
"""
return self._tr_cores
@property
def raw_shape(self):
return self._raw_shape
@property
def is_tr_matrix(self):
return self._is_tr_matrix
@property
def shape(self):
return self._shape
@property
def ranks(self):
return self._ranks
@property
def ndims(self):
return self._ndims
@property
def is_parameter(self):
return self._is_parameter
@property
def parameter(self):
if self.is_parameter:
return self._parameter
else:
raise ValueError('Not a parameter, run .to_parameter() first')
@property
def dof(self):
return self._dof
@property
def total(self):
return self._total
def full(self):
num_dims = self._ndims
ranks = self._ranks
shape = self._shape
raw_shape = self._raw_shape
res = self.tr_cores[0]
for core_idx in range(1, num_dims):
curr_core = self.tr_cores[core_idx]
# print('loop', core_idx, curr_core.shape)
res = torch.tensordot(res, curr_core, dims=[[-1], [0]])
res = torch.einsum('i...i->...', res) # trace
# print(res.shape)
if self.is_tr_matrix:
transpose = []
for i in range(0, 2 * num_dims, 2):
transpose.append(i)
for i in range(1, 2 * num_dims, 2):
transpose.append(i)
res = res.permute(*transpose)
# print(transpose)
if self.is_tr_matrix:
res = res.contiguous().view(*shape)
else:
res = res.view(*shape)
return res
def to_parameter(self):
new_cores = []
for core in self.tr_cores:
core = nn.Parameter(core)
core.is_tr = True
new_cores.append(core)
tr_p = t3.TensorRing(new_cores, convert_to_tensors=False)
tr_p._parameter = nn.ParameterList(tr_p.tr_cores)
tr_p._is_parameter = True
return tr_p
| 3,719 | 26.969925 | 94 | py |
FinQA | FinQA-main/code/generator/Test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main script
"""
from tqdm import tqdm
import json
import os
from datetime import datetime
import time
import logging
from utils import *
from config import parameters as conf
from torch import nn
import torch
import torch.optim as optim
from Model_new import Bert_model
if conf.pretrained_model == "bert":
print("Using bert")
from transformers import BertTokenizer
from transformers import BertConfig
tokenizer = BertTokenizer.from_pretrained(conf.model_size)
model_config = BertConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "roberta":
print("Using roberta")
from transformers import RobertaTokenizer
from transformers import RobertaConfig
tokenizer = RobertaTokenizer.from_pretrained(conf.model_size)
model_config = RobertaConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "finbert":
print("Using finbert")
from transformers import BertTokenizer
from transformers import BertConfig
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model_config = BertConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "longformer":
print("Using longformer")
from transformers import LongformerTokenizer, LongformerConfig
tokenizer = LongformerTokenizer.from_pretrained(conf.model_size)
model_config = LongformerConfig.from_pretrained(conf.model_size)
saved_model_path = os.path.join(conf.output_path, conf.saved_model_path)
# model_dir_name = datetime.now().strftime("%Y%m%d%H%M%S")
model_dir_name = datetime.now().strftime("%Y%m%d%H%M%S") + \
"_" + conf.model_save_name
model_dir = os.path.join(
conf.output_path, 'inference_only_' + model_dir_name)
results_path = os.path.join(model_dir, "results")
os.makedirs(results_path, exist_ok=False)
log_file = os.path.join(results_path, 'log.txt')
op_list = read_txt(conf.op_list_file, log_file)
op_list = [op + '(' for op in op_list]
op_list = ['EOF', 'UNK', 'GO', ')'] + op_list
const_list = read_txt(conf.const_list_file, log_file)
const_list = [const.lower().replace('.', '_') for const in const_list]
reserved_token_size = len(op_list) + len(const_list)
test_data, test_examples, op_list, const_list = \
read_examples(input_path=conf.test_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
print(const_list)
print(op_list)
kwargs = {"examples": test_examples,
"tokenizer": tokenizer,
"max_seq_length": conf.max_seq_length,
"max_program_length": conf.max_program_length,
"is_training": False,
"op_list": op_list,
"op_list_size": len(op_list),
"const_list": const_list,
"const_list_size": len(const_list),
"verbose": True}
test_features = convert_examples_to_features(**kwargs)
def generate(data_ori, data, model, ksave_dir, mode='valid'):
pred_list = []
pred_unk = []
ksave_dir_mode = os.path.join(ksave_dir, mode)
os.makedirs(ksave_dir_mode, exist_ok=True)
data_iterator = DataLoader(
is_training=False, data=data, batch_size=conf.batch_size_test, reserved_token_size=reserved_token_size, shuffle=False)
k = 0
all_results = []
with torch.no_grad():
for x in tqdm(data_iterator):
input_ids = x['input_ids']
input_mask = x['input_mask']
segment_ids = x['segment_ids']
program_ids = x['program_ids']
program_mask = x['program_mask']
option_mask = x['option_mask']
ori_len = len(input_ids)
for each_item in [input_ids, input_mask, segment_ids, program_ids, program_mask, option_mask]:
if ori_len < conf.batch_size_test:
each_len = len(each_item[0])
pad_x = [0] * each_len
each_item += [pad_x] * (conf.batch_size_test - ori_len)
input_ids = torch.tensor(input_ids).to(conf.device)
input_mask = torch.tensor(input_mask).to(conf.device)
segment_ids = torch.tensor(segment_ids).to(conf.device)
program_ids = torch.tensor(program_ids).to(conf.device)
program_mask = torch.tensor(program_mask).to(conf.device)
option_mask = torch.tensor(option_mask).to(conf.device)
logits = model(False, input_ids, input_mask,
segment_ids, option_mask, program_ids, program_mask, device=conf.device)
for this_logit, this_id in zip(logits.tolist(), x["unique_id"]):
all_results.append(
RawResult(
unique_id=int(this_id),
logits=this_logit,
loss=None
))
output_prediction_file = os.path.join(ksave_dir_mode,
"predictions.json")
output_nbest_file = os.path.join(ksave_dir_mode,
"nbest_predictions.json")
output_eval_file = os.path.join(ksave_dir_mode, "evals.json")
all_predictions, all_nbest = compute_predictions(
data_ori,
data,
all_results,
n_best_size=conf.n_best_size,
max_program_length=conf.max_program_length,
tokenizer=tokenizer,
op_list=op_list,
op_list_size=len(op_list),
const_list=const_list,
const_list_size=len(const_list))
write_predictions(all_predictions, output_prediction_file)
write_predictions(all_nbest, output_nbest_file)
return
def generate_test():
model = Bert_model(num_decoder_layers=conf.num_decoder_layers,
hidden_size=model_config.hidden_size,
dropout_rate=conf.dropout_rate,
program_length=conf.max_program_length,
input_length=conf.max_seq_length,
op_list=op_list,
const_list=const_list)
model = nn.DataParallel(model)
model.to(conf.device)
model.load_state_dict(torch.load(conf.saved_model_path))
model.eval()
generate(test_examples, test_features, model, results_path, mode='test')
if conf.mode != "private":
res_file = results_path + "/test/nbest_predictions.json"
error_file = results_path + "/test/full_results_error.json"
all_res_file = results_path + "/test/full_results.json"
evaluate_score(res_file, error_file, all_res_file)
def evaluate_score(file_in, error_file, all_res_file):
exe_acc, prog_acc = evaluate_result(
file_in, conf.test_file, all_res_file, error_file, program_mode=conf.program_mode)
prog_res = "exe acc: " + str(exe_acc) + " prog acc: " + str(prog_acc)
write_log(log_file, prog_res)
if __name__ == '__main__':
generate_test()
| 6,866 | 34.95288 | 126 | py |
FinQA | FinQA-main/code/generator/utils.py | import time
import os
import sys
import shutil
import io
import subprocess
import re
import zipfile
import json
import copy
import torch
import random
import collections
import math
import numpy as np
from tqdm import tqdm
import torch.nn.functional as F
from config import parameters as conf
from transformers import BertTokenizer, BertModel, BertConfig
import finqa_utils as finqa_utils
from sympy import simplify
# Progress bar
TOTAL_BAR_LENGTH = 100.
last_time = time.time()
begin_time = last_time
print(os.popen('stty size', 'r').read())
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
all_ops = ["add", "subtract", "multiply", "divide", "exp", "greater", "table_max",
"table_min", "table_sum", "table_average"]
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def write_word(pred_list, save_dir, name):
ss = open(save_dir + name, "w+")
for item in pred_list:
ss.write(" ".join(item) + '\n')
def get_current_git_version():
import git
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
return sha
def write_log(log_file, s):
print(s)
with open(log_file, 'a') as f:
f.write(s+'\n')
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def read_txt(input_path, log_file):
"""Read a txt file into a list."""
write_log(log_file, "Reading: %s" % input_path)
with open(input_path) as input_file:
input_data = input_file.readlines()
items = []
for line in input_data:
items.append(line.strip())
return items
def read_examples(input_path, tokenizer, op_list, const_list, log_file):
"""Read a json file into a list of examples."""
write_log(log_file, "Reading " + input_path)
with open(input_path) as input_file:
input_data = json.load(input_file)
examples = []
for entry in tqdm(input_data):
examples.append(finqa_utils.read_mathqa_entry(entry, tokenizer))
program = examples[-1].program
# for tok in program:
# if 'const_' in tok and not (tok in const_list):
# const_list.append(tok)
# elif '(' in tok and not (tok in op_list):
# op_list.append(tok)
return input_data, examples, op_list, const_list
def convert_examples_to_features(examples,
tokenizer,
max_seq_length,
max_program_length,
is_training,
op_list,
op_list_size,
const_list,
const_list_size,
verbose=True):
"""Converts a list of DropExamples into InputFeatures."""
unique_id = 1000000000
res = []
for (example_index, example) in enumerate(examples):
features = example.convert_single_example(
is_training=is_training,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
max_program_length=max_program_length,
op_list=op_list,
op_list_size=op_list_size,
const_list=const_list,
const_list_size=const_list_size,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token)
for feature in features:
feature.unique_id = unique_id
feature.example_index = example_index
res.append(feature)
unique_id += 1
return res
RawResult = collections.namedtuple(
"RawResult",
"unique_id logits loss")
def compute_prog_from_logits(logits, max_program_length, example,
template=None):
pred_prog_ids = []
op_stack = []
loss = 0
for cur_step in range(max_program_length):
cur_logits = logits[cur_step]
cur_pred_softmax = _compute_softmax(cur_logits)
cur_pred_token = np.argmax(cur_logits)
loss -= np.log(cur_pred_softmax[cur_pred_token])
pred_prog_ids.append(cur_pred_token)
if cur_pred_token == 0:
break
return pred_prog_ids, loss
def compute_predictions(all_examples, all_features, all_results, n_best_size,
max_program_length, tokenizer, op_list, op_list_size,
const_list, const_list_size):
"""Computes final predictions based on logits."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", [
"feature_index", "logits"
])
all_predictions = collections.OrderedDict()
all_predictions["pred_programs"] = collections.OrderedDict()
all_predictions["ref_programs"] = collections.OrderedDict()
all_nbest = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
logits = result.logits
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
logits=logits))
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", "options answer program_ids program")
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
program = example.program
pred_prog_ids, loss = compute_prog_from_logits(pred.logits,
max_program_length,
example)
pred_prog = finqa_utils.indices_to_prog(pred_prog_ids,
example.numbers,
example.number_indices,
conf.max_seq_length,
op_list, op_list_size,
const_list, const_list_size
)
nbest.append(
_NbestPrediction(
options=example.options,
answer=example.answer,
program_ids=pred_prog_ids,
program=pred_prog))
assert len(nbest) >= 1
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["id"] = example.id
output["options"] = entry.options
output["ref_answer"] = entry.answer
output["pred_prog"] = [str(prog) for prog in entry.program]
output["ref_prog"] = example.program
output["question_tokens"] = example.question_tokens
output["numbers"] = example.numbers
output["number_indices"] = example.number_indices
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions["pred_programs"][example_index] = nbest_json[0]["pred_prog"]
all_predictions["ref_programs"][example_index] = nbest_json[0]["ref_prog"]
all_nbest[example_index] = nbest_json
return all_predictions, all_nbest
def write_predictions(all_predictions, output_prediction_file):
"""Writes final predictions in json format."""
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
class DataLoader:
def __init__(self, is_training, data, reserved_token_size, batch_size=64, shuffle=True):
"""
Main dataloader
"""
self.data = data
self.batch_size = batch_size
self.is_training = is_training
self.data_size = len(data)
self.reserved_token_size = reserved_token_size
self.num_batches = int(self.data_size / batch_size) if self.data_size % batch_size == 0 \
else int(self.data_size / batch_size) + 1
if shuffle:
self.shuffle_all_data()
self.count = 0
def __iter__(self):
return self
def __next__(self):
# drop last batch
if self.is_training:
bound = self.num_batches - 1
else:
bound = self.num_batches
if self.count < bound:
return self.get_batch()
else:
raise StopIteration
def __len__(self):
return self.num_batches
def reset(self):
self.count = 0
self.shuffle_all_data()
def shuffle_all_data(self):
random.shuffle(self.data)
return
def get_batch(self):
start_index = self.count * self.batch_size
end_index = min((self.count + 1) * self.batch_size, self.data_size)
self.count += 1
# print (self.count)
batch_data = {"unique_id": [],
"example_index": [],
"tokens": [],
"question": [],
"input_ids": [],
"input_mask": [],
"option_mask": [],
"segment_ids": [],
"options": [],
"answer": [],
"program": [],
"program_ids": [],
"program_weight": [],
"program_mask": []}
for each_data in self.data[start_index: end_index]:
batch_data["option_mask"].append(each_data.option_mask)
batch_data["input_mask"].append(each_data.input_mask)
batch_data["unique_id"].append(each_data.unique_id)
batch_data["example_index"].append(each_data.example_index)
batch_data["tokens"].append(each_data.tokens)
batch_data["question"].append(each_data.question)
batch_data["input_ids"].append(each_data.input_ids)
batch_data["segment_ids"].append(each_data.segment_ids)
batch_data["options"].append(each_data.options)
batch_data["answer"].append(each_data.answer)
batch_data["program"].append(each_data.program)
batch_data["program_ids"].append(each_data.program_ids)
batch_data["program_weight"].append(each_data.program_weight)
batch_data["program_mask"].append(each_data.program_mask)
return batch_data
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def str_to_num(text):
text = text.replace(",", "")
try:
num = float(text)
except ValueError:
if "%" in text:
text = text.replace("%", "")
try:
num = float(text)
num = num / 100.0
except ValueError:
num = "n/a"
elif "const" in text:
text = text.replace("const_", "")
if text == "m1":
text = "-1"
num = float(text)
else:
num = "n/a"
return num
def process_row(row_in):
row_out = []
invalid_flag = 0
for num in row_in:
num = num.replace("$", "").strip()
num = num.split("(")[0].strip()
num = str_to_num(num)
if num == "n/a":
invalid_flag = 1
break
row_out.append(num)
if invalid_flag:
return "n/a"
return row_out
def reprog_to_seq(prog_in, is_gold):
'''
predicted recursive program to list program
["divide(", "72", "multiply(", "6", "210", ")", ")"]
["multiply(", "6", "210", ")", "divide(", "72", "#0", ")"]
'''
st = []
res = []
try:
num = 0
for tok in prog_in:
if tok != ")":
st.append(tok)
else:
this_step_vec = [")"]
for _ in range(3):
this_step_vec.append(st[-1])
st = st[:-1]
res.extend(this_step_vec[::-1])
st.append("#" + str(num))
num += 1
except:
if is_gold:
raise ValueError
return res
def eval_program(program, table):
'''
calculate the numerical results of the program
'''
invalid_flag = 0
this_res = "n/a"
try:
program = program[:-1] # remove EOF
# check structure
for ind, token in enumerate(program):
if ind % 4 == 0:
if token.strip("(") not in all_ops:
return 1, "n/a"
if (ind + 1) % 4 == 0:
if token != ")":
return 1, "n/a"
program = "|".join(program)
steps = program.split(")")[:-1]
res_dict = {}
for ind, step in enumerate(steps):
step = step.strip()
if len(step.split("(")) > 2:
invalid_flag = 1
break
op = step.split("(")[0].strip("|").strip()
args = step.split("(")[1].strip("|").strip()
arg1 = args.split("|")[0].strip()
arg2 = args.split("|")[1].strip()
if op == "add" or op == "subtract" or op == "multiply" or op == "divide" or op == "exp" or op == "greater":
if "#" in arg1:
arg1 = res_dict[int(arg1.replace("#", ""))]
else:
arg1 = str_to_num(arg1)
if arg1 == "n/a":
invalid_flag = 1
break
if "#" in arg2:
arg2 = res_dict[int(arg2.replace("#", ""))]
else:
arg2 = str_to_num(arg2)
if arg2 == "n/a":
invalid_flag = 1
break
if op == "add":
this_res = arg1 + arg2
elif op == "subtract":
this_res = arg1 - arg2
elif op == "multiply":
this_res = arg1 * arg2
elif op == "divide":
this_res = arg1 / arg2
elif op == "exp":
this_res = arg1 ** arg2
elif op == "greater":
this_res = "yes" if arg1 > arg2 else "no"
res_dict[ind] = this_res
elif "table" in op:
table_dict = {}
for row in table:
table_dict[row[0]] = row[1:]
if "#" in arg1:
arg1 = res_dict[int(arg1.replace("#", ""))]
else:
if arg1 not in table_dict:
invalid_flag = 1
break
cal_row = table_dict[arg1]
num_row = process_row(cal_row)
if num_row == "n/a":
invalid_flag = 1
break
if op == "table_max":
this_res = max(num_row)
elif op == "table_min":
this_res = min(num_row)
elif op == "table_sum":
this_res = sum(num_row)
elif op == "table_average":
this_res = sum(num_row) / len(num_row)
res_dict[ind] = this_res
if this_res != "yes" and this_res != "no" and this_res != "n/a":
this_res = round(this_res, 5)
except:
invalid_flag = 1
return invalid_flag, this_res
def equal_program(program1, program2):
'''
symbolic program if equal
program1: gold
program2: pred
'''
sym_map = {}
program1 = program1[:-1] # remove EOF
program1 = "|".join(program1)
steps = program1.split(")")[:-1]
invalid_flag = 0
sym_ind = 0
step_dict_1 = {}
# symbolic map
for ind, step in enumerate(steps):
step = step.strip()
assert len(step.split("(")) <= 2
op = step.split("(")[0].strip("|").strip()
args = step.split("(")[1].strip("|").strip()
arg1 = args.split("|")[0].strip()
arg2 = args.split("|")[1].strip()
step_dict_1[ind] = step
if "table" in op:
if step not in sym_map:
sym_map[step] = "a" + str(sym_ind)
sym_ind += 1
else:
if "#" not in arg1:
if arg1 not in sym_map:
sym_map[arg1] = "a" + str(sym_ind)
sym_ind += 1
if "#" not in arg2:
if arg2 not in sym_map:
sym_map[arg2] = "a" + str(sym_ind)
sym_ind += 1
# check program 2
step_dict_2 = {}
try:
program2 = program2[:-1] # remove EOF
# check structure
for ind, token in enumerate(program2):
if ind % 4 == 0:
if token.strip("(") not in all_ops:
print("structure error")
return False
if (ind + 1) % 4 == 0:
if token != ")":
print("structure error")
return False
program2 = "|".join(program2)
steps = program2.split(")")[:-1]
for ind, step in enumerate(steps):
step = step.strip()
if len(step.split("(")) > 2:
return False
op = step.split("(")[0].strip("|").strip()
args = step.split("(")[1].strip("|").strip()
arg1 = args.split("|")[0].strip()
arg2 = args.split("|")[1].strip()
step_dict_2[ind] = step
if "table" in op:
if step not in sym_map:
return False
else:
if "#" not in arg1:
if arg1 not in sym_map:
return False
else:
if int(arg1.strip("#")) >= ind:
return False
if "#" not in arg2:
if arg2 not in sym_map:
return False
else:
if int(arg2.strip("#")) >= ind:
return False
except:
return False
def symbol_recur(step, step_dict):
step = step.strip()
op = step.split("(")[0].strip("|").strip()
args = step.split("(")[1].strip("|").strip()
arg1 = args.split("|")[0].strip()
arg2 = args.split("|")[1].strip()
if "table" in op:
# as var
return sym_map[step]
if "#" in arg1:
arg1_ind = int(arg1.replace("#", ""))
arg1_part = symbol_recur(step_dict[arg1_ind], step_dict)
else:
arg1_part = sym_map[arg1]
if "#" in arg2:
arg2_ind = int(arg2.replace("#", ""))
arg2_part = symbol_recur(step_dict[arg2_ind], step_dict)
else:
arg2_part = sym_map[arg2]
if op == "add":
return "( " + arg1_part + " + " + arg2_part + " )"
elif op == "subtract":
return "( " + arg1_part + " - " + arg2_part + " )"
elif op == "multiply":
return "( " + arg1_part + " * " + arg2_part + " )"
elif op == "divide":
return "( " + arg1_part + " / " + arg2_part + " )"
elif op == "exp":
return "( " + arg1_part + " ** " + arg2_part + " )"
elif op == "greater":
return "( " + arg1_part + " > " + arg2_part + " )"
# # derive symbolic program 1
steps = program1.split(")")[:-1]
sym_prog1 = symbol_recur(steps[-1], step_dict_1)
sym_prog1 = simplify(sym_prog1, evaluate=False)
try:
# derive symbolic program 2
steps = program2.split(")")[:-1]
sym_prog2 = symbol_recur(steps[-1], step_dict_2)
sym_prog2 = simplify(sym_prog2, evaluate=False)
except:
return False
return sym_prog1 == sym_prog2
def evaluate_result(json_in, json_ori, all_res_file, error_file, program_mode):
'''
execution acc
program acc
'''
correct = 0
with open(json_in) as f_in:
data = json.load(f_in)
with open(json_ori) as f_in:
data_ori = json.load(f_in)
data_dict = {}
for each_data in data_ori:
assert each_data["id"] not in data_dict
data_dict[each_data["id"]] = each_data
exe_correct = 0
prog_correct = 0
res_list = []
all_res_list = []
for tmp in data:
each_data = data[tmp][0]
each_id = each_data["id"]
each_ori_data = data_dict[each_id]
table = each_ori_data["table"]
gold_res = each_ori_data["qa"]["exe_ans"]
pred = each_data["pred_prog"]
gold = each_data["ref_prog"]
if program_mode == "nest":
if pred[-1] == "EOF":
pred = pred[:-1]
pred = reprog_to_seq(pred, is_gold=False)
pred += ["EOF"]
gold = gold[:-1]
gold = reprog_to_seq(gold, is_gold=True)
gold += ["EOF"]
invalid_flag, exe_res = eval_program(pred, table)
if invalid_flag == 0:
if exe_res == gold_res:
exe_correct += 1
if equal_program(gold, pred):
if exe_res != gold_res:
print(each_id)
print(gold)
print(pred)
print(gold_res)
print(exe_res)
print(each_ori_data["id"])
assert exe_res == gold_res
prog_correct += 1
if "".join(gold) != "".join(pred):
print(each_id)
print(gold)
print(pred)
print(gold_res)
print(exe_res)
print(each_ori_data["id"])
each_ori_data["qa"]["predicted"] = pred
if exe_res != gold_res:
res_list.append(each_ori_data)
all_res_list.append(each_ori_data)
exe_acc = float(exe_correct) / len(data)
prog_acc = float(prog_correct) / len(data)
print("All: ", len(data))
print("Correct: ", correct)
print("Exe acc: ", exe_acc)
print("Prog acc: ", prog_acc)
with open(error_file, "w") as f:
json.dump(res_list, f, indent=4)
with open(all_res_file, "w") as f:
json.dump(all_res_list, f, indent=4)
return exe_acc, prog_acc
if __name__ == '__main__':
root = "your_root_path"
our_data = root + "dataset/"
| 25,374 | 28.747948 | 119 | py |
FinQA | FinQA-main/code/generator/Main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main script
"""
from tqdm import tqdm
import json
import os
from datetime import datetime
import time
import logging
from utils import *
from config import parameters as conf
from torch import nn
import torch
import torch.optim as optim
from Model_new import Bert_model
if conf.pretrained_model == "bert":
print("Using bert")
from transformers import BertTokenizer
from transformers import BertConfig
tokenizer = BertTokenizer.from_pretrained(conf.model_size)
model_config = BertConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "roberta":
print("Using roberta")
from transformers import RobertaTokenizer
from transformers import RobertaConfig
tokenizer = RobertaTokenizer.from_pretrained(conf.model_size)
model_config = RobertaConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "finbert":
print("Using finbert")
from transformers import BertTokenizer
from transformers import BertConfig
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model_config = BertConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "longformer":
print("Using longformer")
from transformers import LongformerTokenizer, LongformerConfig
tokenizer = LongformerTokenizer.from_pretrained(conf.model_size)
model_config = LongformerConfig.from_pretrained(conf.model_size)
# create output paths
if conf.mode == "train":
model_dir_name = conf.model_save_name + "_" + \
datetime.now().strftime("%Y%m%d%H%M%S")
model_dir = os.path.join(conf.output_path, model_dir_name)
results_path = os.path.join(model_dir, "results")
saved_model_path = os.path.join(model_dir, "saved_model")
os.makedirs(saved_model_path, exist_ok=False)
os.makedirs(results_path, exist_ok=False)
log_file = os.path.join(results_path, 'log.txt')
else:
saved_model_path = os.path.join(conf.output_path, conf.saved_model_path)
model_dir_name = datetime.now().strftime("%Y%m%d%H%M%S")
model_dir = os.path.join(
conf.output_path, 'inference_only_' + model_dir_name)
results_path = os.path.join(model_dir, "results")
os.makedirs(results_path, exist_ok=False)
log_file = os.path.join(results_path, 'log.txt')
op_list = read_txt(conf.op_list_file, log_file)
op_list = [op + '(' for op in op_list]
op_list = ['EOF', 'UNK', 'GO', ')'] + op_list
const_list = read_txt(conf.const_list_file, log_file)
const_list = [const.lower().replace('.', '_') for const in const_list]
reserved_token_size = len(op_list) + len(const_list)
print(op_list)
print(const_list)
train_data, train_examples, op_list, const_list = \
read_examples(input_path=conf.train_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
valid_data, valid_examples, op_list, const_list = \
read_examples(input_path=conf.valid_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
test_data, test_examples, op_list, const_list = \
read_examples(input_path=conf.test_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
kwargs = {"examples": train_examples,
"tokenizer": tokenizer,
"max_seq_length": conf.max_seq_length,
"max_program_length": conf.max_program_length,
"is_training": True,
"op_list": op_list,
"op_list_size": len(op_list),
"const_list": const_list,
"const_list_size": len(const_list),
"verbose": True}
train_features = convert_examples_to_features(**kwargs)
kwargs["examples"] = valid_examples
kwargs["is_training"] = False
valid_features = convert_examples_to_features(**kwargs)
kwargs["examples"] = test_examples
test_features = convert_examples_to_features(**kwargs)
def train():
# keep track of all input parameters
write_log(log_file, "####################INPUT PARAMETERS###################")
for attr in conf.__dict__:
value = conf.__dict__[attr]
write_log(log_file, attr + " = " + str(value))
write_log(log_file, "#######################################################")
model = Bert_model(num_decoder_layers=conf.num_decoder_layers,
hidden_size=model_config.hidden_size,
dropout_rate=conf.dropout_rate,
program_length=conf.max_program_length,
input_length=conf.max_seq_length,
op_list=op_list,
const_list=const_list)
model = nn.DataParallel(model)
model.to(conf.device)
optimizer = optim.Adam(model.parameters(), conf.learning_rate)
criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=-1)
model.train()
# torch.autograd.set_detect_anomaly(True)
train_iterator = DataLoader(
is_training=True, data=train_features, batch_size=conf.batch_size, reserved_token_size=reserved_token_size, shuffle=True)
k = 0
record_k = 0
record_loss_k = 0
loss, start_time = 0.0, time.time()
record_loss = 0.0
for _ in range(conf.epoch):
train_iterator.reset()
for x in train_iterator:
input_ids = torch.tensor(x['input_ids']).to(conf.device)
input_mask = torch.tensor(x['input_mask']).to(conf.device)
segment_ids = torch.tensor(x['segment_ids']).to(conf.device)
program_ids = torch.tensor(x['program_ids']).to(conf.device)
program_mask = torch.tensor(x['program_mask']).to(conf.device)
option_mask = torch.tensor(x['option_mask']).to(conf.device)
model.zero_grad()
optimizer.zero_grad()
this_logits = model(True, input_ids, input_mask, segment_ids,
option_mask, program_ids, program_mask, device=conf.device)
this_loss = criterion(
this_logits.view(-1, this_logits.shape[-1]), program_ids.view(-1))
this_loss = this_loss * program_mask.view(-1)
# per token loss
this_loss = this_loss.sum() / program_mask.sum()
record_loss += this_loss.item()
record_k += 1
k += 1
this_loss.backward()
optimizer.step()
if k > 1 and k % conf.report_loss == 0:
write_log(log_file, "%d : loss = %.3f" %
(k, record_loss / record_k))
record_loss = 0.0
record_k = 0
if k > 1 and k % conf.report == 0:
print("Round: ", k / conf.report)
model.eval()
cost_time = time.time() - start_time
write_log(log_file, "%d : time = %.3f " %
(k // conf.report, cost_time))
start_time = time.time()
if k // conf.report >= 1:
print("Val test")
# save model
saved_model_path_cnt = os.path.join(
saved_model_path, 'loads', str(k // conf.report))
os.makedirs(saved_model_path_cnt, exist_ok=True)
torch.save(model.state_dict(),
saved_model_path_cnt + "/model.pt")
results_path_cnt = os.path.join(
results_path, 'loads', str(k // conf.report))
os.makedirs(results_path_cnt, exist_ok=True)
validation_result = evaluate(
valid_examples, valid_features, model, results_path_cnt, 'valid')
# write_log(log_file, validation_result)
model.train()
def evaluate(data_ori, data, model, ksave_dir, mode='valid'):
pred_list = []
pred_unk = []
ksave_dir_mode = os.path.join(ksave_dir, mode)
os.makedirs(ksave_dir_mode, exist_ok=True)
data_iterator = DataLoader(
is_training=False, data=data, batch_size=conf.batch_size_test, reserved_token_size=reserved_token_size, shuffle=False)
k = 0
all_results = []
with torch.no_grad():
for x in tqdm(data_iterator):
input_ids = x['input_ids']
input_mask = x['input_mask']
segment_ids = x['segment_ids']
program_ids = x['program_ids']
program_mask = x['program_mask']
option_mask = x['option_mask']
ori_len = len(input_ids)
for each_item in [input_ids, input_mask, segment_ids, program_ids, program_mask, option_mask]:
if ori_len < conf.batch_size_test:
each_len = len(each_item[0])
pad_x = [0] * each_len
each_item += [pad_x] * (conf.batch_size_test - ori_len)
input_ids = torch.tensor(input_ids).to(conf.device)
input_mask = torch.tensor(input_mask).to(conf.device)
segment_ids = torch.tensor(segment_ids).to(conf.device)
program_ids = torch.tensor(program_ids).to(conf.device)
program_mask = torch.tensor(program_mask).to(conf.device)
option_mask = torch.tensor(option_mask).to(conf.device)
logits = model(False, input_ids, input_mask,
segment_ids, option_mask, program_ids, program_mask, device=conf.device)
for this_logit, this_id in zip(logits.tolist(), x["unique_id"]):
all_results.append(
RawResult(
unique_id=int(this_id),
logits=this_logit,
loss=None
))
output_prediction_file = os.path.join(ksave_dir_mode,
"predictions.json")
output_nbest_file = os.path.join(ksave_dir_mode,
"nbest_predictions.json")
output_eval_file = os.path.join(ksave_dir_mode, "full_results.json")
output_error_file = os.path.join(ksave_dir_mode, "full_results_error.json")
all_predictions, all_nbest = compute_predictions(
data_ori,
data,
all_results,
n_best_size=conf.n_best_size,
max_program_length=conf.max_program_length,
tokenizer=tokenizer,
op_list=op_list,
op_list_size=len(op_list),
const_list=const_list,
const_list_size=len(const_list))
write_predictions(all_predictions, output_prediction_file)
write_predictions(all_nbest, output_nbest_file)
if mode == "valid":
original_file = conf.valid_file
else:
original_file = conf.test_file
exe_acc, prog_acc = evaluate_result(
output_nbest_file, original_file, output_eval_file, output_error_file, program_mode=conf.program_mode)
prog_res = "exe acc: " + str(exe_acc) + " prog acc: " + str(prog_acc)
write_log(log_file, prog_res)
return
if __name__ == '__main__':
if conf.mode == "train":
train()
| 11,036 | 36.927835 | 129 | py |
FinQA | FinQA-main/code/generator/Model_new.py | import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
import math
import numpy as np
from config import parameters as conf
if conf.pretrained_model == "bert":
from transformers import BertModel
elif conf.pretrained_model == "roberta":
from transformers import RobertaModel
elif conf.pretrained_model == "finbert":
from transformers import BertModel
elif conf.pretrained_model == "longformer":
from transformers import LongformerModel
class Bert_model(nn.Module):
def __init__(self, num_decoder_layers, hidden_size, dropout_rate, input_length,
program_length, op_list, const_list):
super(Bert_model, self).__init__()
self.op_list_size = len(op_list)
self.const_list_size = len(const_list)
self.reserved_token_size = self.op_list_size + self.const_list_size
self.program_length = program_length
self.hidden_size = hidden_size
self.const_list = const_list
self.op_list = op_list
self.input_length = input_length
self.reserved_ind = nn.Parameter(torch.arange(
0, self.reserved_token_size), requires_grad=False)
self.reserved_go = nn.Parameter(torch.arange(op_list.index(
'GO'), op_list.index('GO') + 1), requires_grad=False)
self.reserved_para = nn.Parameter(torch.arange(op_list.index(
')'), op_list.index(')') + 1), requires_grad=False)
# masking for decoidng for test time
op_ones = nn.Parameter(torch.ones(
self.op_list_size), requires_grad=False)
op_zeros = nn.Parameter(torch.zeros(
self.op_list_size), requires_grad=False)
other_ones = nn.Parameter(torch.ones(
input_length + self.const_list_size), requires_grad=False)
other_zeros = nn.Parameter(torch.zeros(
input_length + self.const_list_size), requires_grad=False)
self.op_only_mask = nn.Parameter(
torch.cat((op_ones, other_zeros), 0), requires_grad=False)
self.seq_only_mask = nn.Parameter(
torch.cat((op_zeros, other_ones), 0), requires_grad=False)
# for ")"
para_before_ones = nn.Parameter(torch.ones(
op_list.index(')')), requires_grad=False)
para_after_ones = nn.Parameter(torch.ones(
input_length + self.reserved_token_size - op_list.index(')') - 1), requires_grad=False)
para_zero = nn.Parameter(torch.zeros(1), requires_grad=False)
self.para_mask = nn.Parameter(torch.cat(
(para_before_ones, para_zero, para_after_ones), 0), requires_grad=False)
# for step embedding
# self.step_masks = []
all_tmp_list = self.op_list + self.const_list
self.step_masks = nn.Parameter(torch.zeros(
conf.max_step_ind, input_length + self.reserved_token_size), requires_grad=False)
for i in range(conf.max_step_ind):
this_step_mask_ind = all_tmp_list.index("#" + str(i))
self.step_masks[i, this_step_mask_ind] = 1.0
# self.step_mask_eye = torch.eye(conf.max_step_ind)
if conf.pretrained_model == "bert":
self.bert = BertModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "roberta":
self.bert = RobertaModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "finbert":
self.bert = BertModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "longformer":
self.bert = LongformerModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
self.cls_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.cls_dropout = nn.Dropout(dropout_rate)
self.seq_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.seq_dropout = nn.Dropout(dropout_rate)
self.reserved_token_embedding = nn.Embedding(
self.reserved_token_size, hidden_size)
# attentions
self.decoder_history_attn_prj = nn.Linear(
hidden_size, hidden_size, bias=True)
self.decoder_history_attn_dropout = nn.Dropout(dropout_rate)
self.question_attn_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.question_attn_dropout = nn.Dropout(dropout_rate)
self.question_summary_attn_prj = nn.Linear(
hidden_size, hidden_size, bias=True)
self.question_summary_attn_dropout = nn.Dropout(dropout_rate)
if conf.sep_attention:
self.input_embeddings_prj = nn.Linear(
hidden_size*3, hidden_size, bias=True)
else:
self.input_embeddings_prj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
self.input_embeddings_layernorm = nn.LayerNorm([1, hidden_size])
self.option_embeddings_prj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
# decoder lstm
self.rnn = torch.nn.LSTM(input_size=hidden_size, hidden_size=hidden_size,
num_layers=conf.num_decoder_layers, batch_first=True)
# step vector
self.decoder_step_proj = nn.Linear(
3*hidden_size, hidden_size, bias=True)
self.decoder_step_proj_dropout = nn.Dropout(dropout_rate)
self.step_mix_proj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
def forward(self, is_training, input_ids, input_mask, segment_ids, option_mask, program_ids, program_mask, device):
bert_outputs = self.bert(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
bert_sequence_output = bert_outputs.last_hidden_state
bert_pooled_output = bert_sequence_output[:, 0, :]
batch_size, seq_length, bert_dim = list(bert_sequence_output.size())
split_program_ids = torch.split(program_ids, 1, dim=1)
# print(self.program_length)
# print(program_ids.size())
# print(split_program_ids[0].size())
pooled_output = self.cls_prj(bert_pooled_output)
pooled_output = self.cls_dropout(pooled_output)
option_size = self.reserved_token_size + seq_length
sequence_output = self.seq_prj(bert_sequence_output)
sequence_output = self.seq_dropout(sequence_output)
op_embeddings = self.reserved_token_embedding(self.reserved_ind)
op_embeddings = op_embeddings.repeat(batch_size, 1, 1)
logits = []
init_decoder_output = self.reserved_token_embedding(self.reserved_go)
decoder_output = init_decoder_output.repeat(batch_size, 1, 1)
# [batch, op + seq len, hidden]
initial_option_embeddings = torch.cat(
[op_embeddings, sequence_output], dim=1)
if conf.sep_attention:
decoder_history = decoder_output
else:
decoder_history = torch.unsqueeze(pooled_output, dim=-1)
decoder_state_h = torch.zeros(
1, batch_size, self.hidden_size, device=device)
decoder_state_c = torch.zeros(
1, batch_size, self.hidden_size, device=device)
float_input_mask = input_mask.float()
float_input_mask = torch.unsqueeze(float_input_mask, dim=-1)
this_step_new_op_emb = initial_option_embeddings
for cur_step in range(self.program_length):
# decoder history att
decoder_history_attn_vec = self.decoder_history_attn_prj(
decoder_output)
decoder_history_attn_vec = self.decoder_history_attn_dropout(
decoder_history_attn_vec)
decoder_history_attn_w = torch.matmul(
decoder_history, torch.transpose(decoder_history_attn_vec, 1, 2))
decoder_history_attn_w = F.softmax(decoder_history_attn_w, dim=1)
decoder_history_ctx_embeddings = torch.matmul(
torch.transpose(decoder_history_attn_w, 1, 2), decoder_history)
if conf.sep_attention:
# input seq att
question_attn_vec = self.question_attn_prj(decoder_output)
question_attn_vec = self.question_attn_dropout(
question_attn_vec)
question_attn_w = torch.matmul(
sequence_output, torch.transpose(question_attn_vec, 1, 2))
question_attn_w -= 1e6 * (1 - float_input_mask)
question_attn_w = F.softmax(question_attn_w, dim=1)
question_ctx_embeddings = torch.matmul(
torch.transpose(question_attn_w, 1, 2), sequence_output)
# another input seq att
question_summary_vec = self.question_summary_attn_prj(
decoder_output)
question_summary_vec = self.question_summary_attn_dropout(
question_summary_vec)
question_summary_w = torch.matmul(
sequence_output, torch.transpose(question_summary_vec, 1, 2))
question_summary_w -= 1e6 * (1 - float_input_mask)
question_summary_w = F.softmax(question_summary_w, dim=1)
question_summary_embeddings = torch.matmul(
torch.transpose(question_summary_w, 1, 2), sequence_output)
if conf.sep_attention:
concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,
question_ctx_embeddings,
decoder_output], dim=-1)
else:
concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,
decoder_output], dim=-1)
input_embeddings = self.input_embeddings_prj(
concat_input_embeddings)
if conf.layer_norm:
input_embeddings = self.input_embeddings_layernorm(
input_embeddings)
question_option_vec = this_step_new_op_emb * question_summary_embeddings
option_embeddings = torch.cat(
[this_step_new_op_emb, question_option_vec], dim=-1)
option_embeddings = self.option_embeddings_prj(option_embeddings)
option_logits = torch.matmul(
option_embeddings, torch.transpose(input_embeddings, 1, 2))
option_logits = torch.squeeze(
option_logits, dim=2) # [batch, op + seq_len]
option_logits -= 1e6 * (1 - option_mask)
logits.append(option_logits)
if is_training:
program_index = torch.unsqueeze(
split_program_ids[cur_step], dim=1)
else:
# constrain decoding
if cur_step % 4 == 0 or (cur_step + 1) % 4 == 0:
# op round
option_logits -= 1e6 * self.seq_only_mask
else:
# number round
option_logits -= 1e6 * self.op_only_mask
if (cur_step + 1) % 4 == 0:
# ")" round
option_logits -= 1e6 * self.para_mask
# print(program_index)
program_index = torch.argmax(
option_logits, axis=-1, keepdim=True)
program_index = torch.unsqueeze(
program_index, dim=1
)
if (cur_step + 1) % 4 == 0:
# update op embeddings
this_step_index = cur_step // 4
this_step_list_index = (
self.op_list + self.const_list).index("#" + str(this_step_index))
this_step_mask = self.step_masks[this_step_index, :]
decoder_step_vec = self.decoder_step_proj(
concat_input_embeddings)
decoder_step_vec = self.decoder_step_proj_dropout(
decoder_step_vec)
decoder_step_vec = torch.squeeze(decoder_step_vec)
this_step_new_emb = decoder_step_vec # [batch, hidden]
this_step_new_emb = torch.unsqueeze(this_step_new_emb, 1)
this_step_new_emb = this_step_new_emb.repeat(
1, self.reserved_token_size+self.input_length, 1) # [batch, op seq, hidden]
this_step_mask = torch.unsqueeze(
this_step_mask, 0) # [1, op seq]
# print(this_step_mask)
this_step_mask = torch.unsqueeze(
this_step_mask, 2) # [1, op seq, 1]
this_step_mask = this_step_mask.repeat(
batch_size, 1, self.hidden_size) # [batch, op seq, hidden]
this_step_new_op_emb = torch.where(
this_step_mask > 0, this_step_new_emb, initial_option_embeddings)
# print(program_index.size())
program_index = torch.repeat_interleave(
program_index, self.hidden_size, dim=2) # [batch, 1, hidden]
input_program_embeddings = torch.gather(
option_embeddings, dim=1, index=program_index)
decoder_output, (decoder_state_h, decoder_state_c) = self.rnn(
input_program_embeddings, (decoder_state_h, decoder_state_c))
decoder_history = torch.cat(
[decoder_history, input_program_embeddings], dim=1)
logits = torch.stack(logits, dim=1)
return logits
| 13,570 | 41.01548 | 119 | py |
FinQA | FinQA-main/code/retriever/Test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main script
"""
from tqdm import tqdm
import json
import os
from datetime import datetime
import time
import logging
from utils import *
from config import parameters as conf
from torch import nn
import torch
import torch.optim as optim
from Model import Bert_model
if conf.pretrained_model == "bert":
from transformers import BertTokenizer
from transformers import BertConfig
tokenizer = BertTokenizer.from_pretrained(conf.model_size)
model_config = BertConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "roberta":
from transformers import RobertaTokenizer
from transformers import RobertaConfig
tokenizer = RobertaTokenizer.from_pretrained(conf.model_size)
model_config = RobertaConfig.from_pretrained(conf.model_size)
saved_model_path = os.path.join(conf.output_path, conf.saved_model_path)
model_dir_name = datetime.now().strftime("%Y%m%d%H%M%S") + \
"_" + conf.model_save_name
model_dir = os.path.join(
conf.output_path, 'inference_only_' + model_dir_name)
results_path = os.path.join(model_dir, "results")
os.makedirs(results_path, exist_ok=False)
log_file = os.path.join(results_path, 'log.txt')
op_list = read_txt(conf.op_list_file, log_file)
op_list = [op + '(' for op in op_list]
op_list = ['EOF', 'UNK', 'GO', ')'] + op_list
const_list = read_txt(conf.const_list_file, log_file)
const_list = [const.lower().replace('.', '_') for const in const_list]
reserved_token_size = len(op_list) + len(const_list)
test_data, test_examples, op_list, const_list = \
read_examples(input_path=conf.test_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
kwargs = {"examples": test_examples,
"tokenizer": tokenizer,
"option": conf.option,
"is_training": False,
"max_seq_length": conf.max_seq_length,
}
kwargs["examples"] = test_examples
test_features = convert_examples_to_features(**kwargs)
def generate(data_ori, data, model, ksave_dir, mode='valid'):
pred_list = []
pred_unk = []
ksave_dir_mode = os.path.join(ksave_dir, mode)
os.makedirs(ksave_dir_mode, exist_ok=True)
data_iterator = DataLoader(
is_training=False, data=data, batch_size=conf.batch_size_test, shuffle=False)
k = 0
all_logits = []
all_filename_id = []
all_ind = []
with torch.no_grad():
for x in tqdm(data_iterator):
input_ids = x['input_ids']
input_mask = x['input_mask']
segment_ids = x['segment_ids']
label = x['label']
filename_id = x["filename_id"]
ind = x["ind"]
ori_len = len(input_ids)
for each_item in [input_ids, input_mask, segment_ids]:
if ori_len < conf.batch_size_test:
each_len = len(each_item[0])
pad_x = [0] * each_len
each_item += [pad_x] * (conf.batch_size_test - ori_len)
input_ids = torch.tensor(input_ids).to(conf.device)
input_mask = torch.tensor(input_mask).to(conf.device)
segment_ids = torch.tensor(segment_ids).to(conf.device)
logits = model(True, input_ids, input_mask,
segment_ids, device=conf.device)
all_logits.extend(logits.tolist())
all_filename_id.extend(filename_id)
all_ind.extend(ind)
output_prediction_file = os.path.join(ksave_dir_mode,
"predictions.json")
if mode == "valid":
print_res = retrieve_evaluate(
all_logits, all_filename_id, all_ind, output_prediction_file, conf.valid_file, topn=conf.topn)
elif mode == "test":
print_res = retrieve_evaluate(
all_logits, all_filename_id, all_ind, output_prediction_file, conf.test_file, topn=conf.topn)
else:
# private data mode
print_res = retrieve_evaluate_private(
all_logits, all_filename_id, all_ind, output_prediction_file, conf.test_file, topn=conf.topn)
write_log(log_file, print_res)
print(print_res)
return
def generate_test():
model = Bert_model(hidden_size=model_config.hidden_size,
dropout_rate=conf.dropout_rate,)
model = nn.DataParallel(model)
model.to(conf.device)
model.load_state_dict(torch.load(conf.saved_model_path))
model.eval()
generate(test_data, test_features, model, results_path, mode='test')
if __name__ == '__main__':
generate_test()
| 4,590 | 31.104895 | 106 | py |
FinQA | FinQA-main/code/retriever/utils.py | import time
import os
import sys
import shutil
import io
import subprocess
import re
import zipfile
import json
import copy
import torch
import random
import collections
import math
import numpy as np
import torch.nn.functional as F
from config import parameters as conf
from tqdm import tqdm
from transformers import BertTokenizer, BertModel, BertConfig
import finqa_utils as finqa_utils
# Progress bar
TOTAL_BAR_LENGTH = 100.
last_time = time.time()
begin_time = last_time
print(os.popen('stty size', 'r').read())
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def write_word(pred_list, save_dir, name):
ss = open(save_dir + name, "w+")
for item in pred_list:
ss.write(" ".join(item) + '\n')
def get_current_git_version():
import git
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
return sha
def write_log(log_file, s):
print(s)
with open(log_file, 'a') as f:
f.write(s+'\n')
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def read_txt(input_path, log_file):
"""Read a txt file into a list."""
write_log(log_file, "Reading: %s" % input_path)
with open(input_path) as input_file:
input_data = input_file.readlines()
items = []
for line in input_data:
items.append(line.strip())
return items
def read_examples(input_path, tokenizer, op_list, const_list, log_file):
"""Read a json file into a list of examples."""
write_log(log_file, "Reading " + input_path)
with open(input_path) as input_file:
input_data = json.load(input_file)
examples = []
for entry in input_data:
examples.append(finqa_utils.read_mathqa_entry(entry, tokenizer))
return input_data, examples, op_list, const_list
def convert_examples_to_features(examples,
tokenizer,
max_seq_length,
option,
is_training,
):
"""Converts a list of DropExamples into InputFeatures."""
res = []
res_neg = []
for (example_index, example) in tqdm(enumerate(examples)):
features, features_neg = example.convert_single_example(
tokenizer=tokenizer,
max_seq_length=max_seq_length,
option=option,
is_training=is_training,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token)
res.extend(features)
res_neg.extend(features_neg)
return res, res_neg
def write_predictions(all_predictions, output_prediction_file):
"""Writes final predictions in json format."""
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
class DataLoader:
def __init__(self, is_training, data, batch_size=64, shuffle=True):
"""
Main dataloader
"""
self.data_pos = data[0]
self.data_neg = data[1]
self.batch_size = batch_size
self.is_training = is_training
if self.is_training:
random.shuffle(self.data_neg)
if conf.option == "tfidf":
self.data = self.data_pos + self.data_neg
else:
num_neg = len(self.data_pos) * conf.neg_rate
self.data = self.data_pos + self.data_neg[:num_neg]
else:
self.data = self.data_pos + self.data_neg
self.data_size = len(self.data)
self.num_batches = int(self.data_size / batch_size) if self.data_size % batch_size == 0 \
else int(self.data_size / batch_size) + 1
self.count = 0
def __iter__(self):
return self
def __next__(self):
# drop last batch
if self.is_training:
bound = self.num_batches - 1
else:
bound = self.num_batches
if self.count < bound:
return self.get_batch()
else:
raise StopIteration
def __len__(self):
return self.num_batches
def reset(self):
self.count = 0
self.shuffle_all_data()
def shuffle_all_data(self):
if conf.option == "tfidf":
random.shuffle(self.data)
else:
random.shuffle(self.data_neg)
num_neg = len(self.data_pos) * conf.neg_rate
self.data = self.data_pos + self.data_neg[:num_neg]
random.shuffle(self.data)
return
def get_batch(self):
start_index = self.count * self.batch_size
end_index = min((self.count + 1) * self.batch_size, self.data_size)
self.count += 1
# print (self.count)
batch_data = {"input_ids": [],
"input_mask": [],
"segment_ids": [],
"filename_id": [],
"label": [],
"ind": []
}
for each_data in self.data[start_index: end_index]:
batch_data["input_ids"].append(each_data["input_ids"])
batch_data["input_mask"].append(each_data["input_mask"])
batch_data["segment_ids"].append(each_data["segment_ids"])
batch_data["filename_id"].append(each_data["filename_id"])
batch_data["label"].append(each_data["label"])
batch_data["ind"].append(each_data["ind"])
return batch_data
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def retrieve_evaluate(all_logits, all_filename_ids, all_inds, output_prediction_file, ori_file, topn):
'''
save results to file. calculate recall
'''
res_filename = {}
res_filename_inds = {}
for this_logit, this_filename_id, this_ind in zip(all_logits, all_filename_ids, all_inds):
if this_filename_id not in res_filename:
res_filename[this_filename_id] = []
res_filename_inds[this_filename_id] = []
if this_ind not in res_filename_inds[this_filename_id]:
res_filename[this_filename_id].append({
"score": this_logit[1],
"ind": this_ind
})
res_filename_inds[this_filename_id].append(this_ind)
with open(ori_file) as f:
data_all = json.load(f)
# take top ten
all_recall = 0.0
all_recall_3 = 0.0
for data in data_all:
this_filename_id = data["id"]
this_res = res_filename[this_filename_id]
sorted_dict = sorted(this_res, key=lambda kv: kv["score"], reverse=True)
# sorted_dict = sorted_dict[:topn]
gold_inds = data["qa"]["gold_inds"]
# table rows
table_retrieved = []
text_retrieved = []
# all retrieved
table_re_all = []
text_re_all = []
correct = 0
correct_3 = 0
for tmp in sorted_dict[:topn]:
if "table" in tmp["ind"]:
table_retrieved.append(tmp)
else:
text_retrieved.append(tmp)
if tmp["ind"] in gold_inds:
correct += 1
for tmp in sorted_dict:
if "table" in tmp["ind"]:
table_re_all.append(tmp)
else:
text_re_all.append(tmp)
for tmp in sorted_dict[:3]:
if tmp["ind"] in gold_inds:
correct_3 += 1
all_recall += (float(correct) / len(gold_inds))
all_recall_3 += (float(correct_3) / len(gold_inds))
data["table_retrieved"] = table_retrieved
data["text_retrieved"] = text_retrieved
data["table_retrieved_all"] = table_re_all
data["text_retrieved_all"] = text_re_all
with open(output_prediction_file, "w") as f:
json.dump(data_all, f, indent=4)
res_3 = all_recall_3 / len(data_all)
res = all_recall / len(data_all)
res = "Top 3: " + str(res_3) + "\n" + "Top 5: " + str(res) + "\n"
return res
def retrieve_evaluate_private(all_logits, all_filename_ids, all_inds, output_prediction_file, ori_file, topn):
'''
save results to file. calculate recall
'''
res_filename = {}
res_filename_inds = {}
for this_logit, this_filename_id, this_ind in zip(all_logits, all_filename_ids, all_inds):
if this_filename_id not in res_filename:
res_filename[this_filename_id] = []
res_filename_inds[this_filename_id] = []
if this_ind not in res_filename_inds[this_filename_id]:
res_filename[this_filename_id].append({
"score": this_logit[1],
"ind": this_ind
})
res_filename_inds[this_filename_id].append(this_ind)
with open(ori_file) as f:
data_all = json.load(f)
for data in data_all:
this_filename_id = data["id"]
this_res = res_filename[this_filename_id]
sorted_dict = sorted(this_res, key=lambda kv: kv["score"], reverse=True)
# table rows
table_retrieved = []
text_retrieved = []
# all retrieved
table_re_all = []
text_re_all = []
for tmp in sorted_dict[:topn]:
if "table" in tmp["ind"]:
table_retrieved.append(tmp)
else:
text_retrieved.append(tmp)
for tmp in sorted_dict:
if "table" in tmp["ind"]:
table_re_all.append(tmp)
else:
text_re_all.append(tmp)
data["table_restrieved"] = table_retrieved
data["text_retrieved"] = text_retrieved
data["table_retrieved_all"] = table_re_all
data["text_retrieved_all"] = text_re_all
with open(output_prediction_file, "w") as f:
json.dump(data_all, f, indent=4)
return "private, no res"
if __name__ == '__main__':
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# text = "this is a -10"
# res = tokenize(tokenizer, text, apply_basic_tokenization=False)
# text = "<a>test test</a>"
# print(cleanhtml(text))
root_path = "/mnt/george_bhd/zhiyuchen/"
outputs = root_path + "outputs/"
json_in = outputs + "test_20210408011241/results/loads/1/valid/nbest_predictions.json"
retrieve_evaluate(json_in)
| 13,173 | 26.106996 | 110 | py |
FinQA | FinQA-main/code/retriever/Main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main script
"""
from tqdm import tqdm
import json
import os
from datetime import datetime
import time
import logging
from utils import *
from config import parameters as conf
from torch import nn
import torch
import torch.optim as optim
from Model import Bert_model
if conf.pretrained_model == "bert":
from transformers import BertTokenizer
from transformers import BertConfig
tokenizer = BertTokenizer.from_pretrained(conf.model_size)
model_config = BertConfig.from_pretrained(conf.model_size)
elif conf.pretrained_model == "roberta":
from transformers import RobertaTokenizer
from transformers import RobertaConfig
tokenizer = RobertaTokenizer.from_pretrained(conf.model_size)
model_config = RobertaConfig.from_pretrained(conf.model_size)
# create output paths
if conf.mode == "train":
model_dir_name = conf.model_save_name + "_" + \
datetime.now().strftime("%Y%m%d%H%M%S")
model_dir = os.path.join(conf.output_path, model_dir_name)
results_path = os.path.join(model_dir, "results")
saved_model_path = os.path.join(model_dir, "saved_model")
os.makedirs(saved_model_path, exist_ok=False)
os.makedirs(results_path, exist_ok=False)
log_file = os.path.join(results_path, 'log.txt')
else:
saved_model_path = os.path.join(conf.output_path, conf.saved_model_path)
model_dir_name = datetime.now().strftime("%Y%m%d%H%M%S")
model_dir = os.path.join(
conf.output_path, 'inference_only_' + model_dir_name)
results_path = os.path.join(model_dir, "results")
os.makedirs(results_path, exist_ok=False)
log_file = os.path.join(results_path, 'log.txt')
op_list = read_txt(conf.op_list_file, log_file)
op_list = [op + '(' for op in op_list]
op_list = ['EOF', 'UNK', 'GO', ')'] + op_list
const_list = read_txt(conf.const_list_file, log_file)
const_list = [const.lower().replace('.', '_') for const in const_list]
reserved_token_size = len(op_list) + len(const_list)
train_data, train_examples, op_list, const_list = \
read_examples(input_path=conf.train_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
valid_data, valid_examples, op_list, const_list = \
read_examples(input_path=conf.valid_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
test_data, test_examples, op_list, const_list = \
read_examples(input_path=conf.test_file, tokenizer=tokenizer,
op_list=op_list, const_list=const_list, log_file=log_file)
kwargs = {"examples": train_examples,
"tokenizer": tokenizer,
"option": conf.option,
"is_training": True,
"max_seq_length": conf.max_seq_length,
}
train_features = convert_examples_to_features(**kwargs)
kwargs["examples"] = valid_examples
kwargs["is_training"] = False
valid_features = convert_examples_to_features(**kwargs)
kwargs["examples"] = test_examples
test_features = convert_examples_to_features(**kwargs)
def train():
# keep track of all input parameters
write_log(log_file, "####################INPUT PARAMETERS###################")
for attr in conf.__dict__:
value = conf.__dict__[attr]
write_log(log_file, attr + " = " + str(value))
write_log(log_file, "#######################################################")
model = Bert_model(hidden_size=model_config.hidden_size,
dropout_rate=conf.dropout_rate,)
model = nn.DataParallel(model)
model.to(conf.device)
optimizer = optim.Adam(model.parameters(), conf.learning_rate)
criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=-1)
model.train()
train_iterator = DataLoader(
is_training=True, data=train_features, batch_size=conf.batch_size, shuffle=True)
k = 0
record_k = 0
record_loss_k = 0
loss, start_time = 0.0, time.time()
record_loss = 0.0
for _ in range(conf.epoch):
train_iterator.reset()
for x in train_iterator:
input_ids = torch.tensor(x['input_ids']).to(conf.device)
input_mask = torch.tensor(x['input_mask']).to(conf.device)
segment_ids = torch.tensor(x['segment_ids']).to(conf.device)
label = torch.tensor(x['label']).to(conf.device)
model.zero_grad()
optimizer.zero_grad()
this_logits = model(True, input_ids, input_mask,
segment_ids, device=conf.device)
this_loss = criterion(
this_logits.view(-1, this_logits.shape[-1]), label.view(-1))
this_loss = this_loss.sum()
record_loss += this_loss.item() * 100
record_k += 1
k += 1
this_loss.backward()
optimizer.step()
if k > 1 and k % conf.report_loss == 0:
write_log(log_file, "%d : loss = %.3f" %
(k, record_loss / record_k))
record_loss = 0.0
record_k = 0
if k > 1 and k % conf.report == 0:
print("Round: ", k / conf.report)
model.eval()
cost_time = time.time() - start_time
write_log(log_file, "%d : time = %.3f " %
(k // conf.report, cost_time))
start_time = time.time()
if k // conf.report >= 1:
print("Val test")
# save model
saved_model_path_cnt = os.path.join(
saved_model_path, 'loads', str(k // conf.report))
os.makedirs(saved_model_path_cnt, exist_ok=True)
torch.save(model.state_dict(),
saved_model_path_cnt + "/model.pt")
results_path_cnt = os.path.join(
results_path, 'loads', str(k // conf.report))
os.makedirs(results_path_cnt, exist_ok=True)
validation_result = evaluate(
valid_examples, valid_features, model, results_path_cnt, 'valid')
# write_log(log_file, validation_result)
model.train()
def evaluate(data_ori, data, model, ksave_dir, mode='valid'):
pred_list = []
pred_unk = []
ksave_dir_mode = os.path.join(ksave_dir, mode)
os.makedirs(ksave_dir_mode, exist_ok=True)
data_iterator = DataLoader(
is_training=False, data=data, batch_size=conf.batch_size_test, shuffle=False)
k = 0
all_logits = []
all_filename_id = []
all_ind = []
with torch.no_grad():
for x in tqdm(data_iterator):
input_ids = x['input_ids']
input_mask = x['input_mask']
segment_ids = x['segment_ids']
label = x['label']
filename_id = x["filename_id"]
ind = x["ind"]
ori_len = len(input_ids)
for each_item in [input_ids, input_mask, segment_ids]:
if ori_len < conf.batch_size_test:
each_len = len(each_item[0])
pad_x = [0] * each_len
each_item += [pad_x] * (conf.batch_size_test - ori_len)
input_ids = torch.tensor(input_ids).to(conf.device)
input_mask = torch.tensor(input_mask).to(conf.device)
segment_ids = torch.tensor(segment_ids).to(conf.device)
logits = model(True, input_ids, input_mask,
segment_ids, device=conf.device)
all_logits.extend(logits.tolist())
all_filename_id.extend(filename_id)
all_ind.extend(ind)
output_prediction_file = os.path.join(ksave_dir_mode,
"predictions.json")
if mode == "valid":
print_res = retrieve_evaluate(
all_logits, all_filename_id, all_ind, output_prediction_file, conf.valid_file, topn=conf.topn)
else:
print_res = retrieve_evaluate(
all_logits, all_filename_id, all_ind, output_prediction_file, conf.test_file, topn=conf.topn)
write_log(log_file, print_res)
print(print_res)
return
if __name__ == '__main__':
train()
| 8,275 | 34.519313 | 106 | py |
FinQA | FinQA-main/code/retriever/Model.py | import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
import math
import numpy as np
from config import parameters as conf
if conf.pretrained_model == "bert":
from transformers import BertModel
elif conf.pretrained_model == "roberta":
from transformers import RobertaModel
class Bert_model(nn.Module):
def __init__(self, hidden_size, dropout_rate):
super(Bert_model, self).__init__()
self.hidden_size = hidden_size
if conf.pretrained_model == "bert":
self.bert = BertModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "roberta":
self.bert = RobertaModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
self.cls_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.cls_dropout = nn.Dropout(dropout_rate)
self.cls_final = nn.Linear(hidden_size, 2, bias=True)
def forward(self, is_training, input_ids, input_mask, segment_ids, device):
bert_outputs = self.bert(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
bert_sequence_output = bert_outputs.last_hidden_state
bert_pooled_output = bert_sequence_output[:, 0, :]
pooled_output = self.cls_prj(bert_pooled_output)
pooled_output = self.cls_dropout(pooled_output)
logits = self.cls_final(pooled_output)
return logits
| 1,504 | 29.1 | 87 | py |
hawp | hawp-master/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir,"parsing", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
# import pdb
# pdb.set_trace()
# if torch.cuda.is_available() and CUDA_HOME is not None:
if CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"parsing._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="hawp",
version="0.1",
author="nxue",
description="holistically-attracted wireframe parsing (in pytorch)",
# packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 2,037 | 27.305556 | 73 | py |
hawp | hawp-master/parsing/detector.py | import torch
from torch import nn
from parsing.backbones import build_backbone
from parsing.encoder.hafm import HAFMencoder
# from epnet.structures.linelist_ops import linesegment_distance
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import time
PRETRAINED = {
'url': 'https://github.com/cherubicXN/hawp-torchhub/releases/download/0.1/model-hawp-hg-5d31f70.pth',
'md5': '5d31f70a6c2477ea7b24e7da96e7b97d',
}
def cross_entropy_loss_for_junction(logits, positive):
nlogp = -F.log_softmax(logits, dim=1)
loss = (positive * nlogp[:, None, 1] + (1 - positive) * nlogp[:, None, 0])
return loss.mean()
def sigmoid_l1_loss(logits, targets, offset = 0.0, mask=None):
logp = torch.sigmoid(logits) + offset
loss = torch.abs(logp-targets)
if mask is not None:
w = mask.mean(3, True).mean(2,True)
w[w==0] = 1
loss = loss*(mask/w)
return loss.mean()
def non_maximum_suppression(a):
ap = F.max_pool2d(a, 3, stride=1, padding=1)
mask = (a == ap).float().clamp(min=0.0)
return a * mask
def get_junctions(jloc, joff, topk = 300, th=0):
height, width = jloc.size(1), jloc.size(2)
jloc = jloc.reshape(-1)
joff = joff.reshape(2, -1)
scores, index = torch.topk(jloc, k=topk)
y = (index // width).float() + torch.gather(joff[1], 0, index) + 0.5
x = (index % width).float() + torch.gather(joff[0], 0, index) + 0.5
junctions = torch.stack((x, y)).t()
return junctions[scores>th], scores[scores>th]
class WireframeDetector(nn.Module):
def __init__(self, cfg):
super(WireframeDetector, self).__init__()
self.hafm_encoder = HAFMencoder(cfg)
self.backbone = build_backbone(cfg)
self.n_dyn_junc = cfg.MODEL.PARSING_HEAD.N_DYN_JUNC
self.n_dyn_posl = cfg.MODEL.PARSING_HEAD.N_DYN_POSL
self.n_dyn_negl = cfg.MODEL.PARSING_HEAD.N_DYN_NEGL
self.n_dyn_othr = cfg.MODEL.PARSING_HEAD.N_DYN_OTHR
self.n_dyn_othr2= cfg.MODEL.PARSING_HEAD.N_DYN_OTHR2
self.n_pts0 = cfg.MODEL.PARSING_HEAD.N_PTS0
self.n_pts1 = cfg.MODEL.PARSING_HEAD.N_PTS1
self.dim_loi = cfg.MODEL.PARSING_HEAD.DIM_LOI
self.dim_fc = cfg.MODEL.PARSING_HEAD.DIM_FC
self.n_out_junc = cfg.MODEL.PARSING_HEAD.N_OUT_JUNC
self.n_out_line = cfg.MODEL.PARSING_HEAD.N_OUT_LINE
self.use_residual = cfg.MODEL.PARSING_HEAD.USE_RESIDUAL
# self.
self.register_buffer('tspan', torch.linspace(0, 1, self.n_pts0)[None,None,:].cuda())
self.loss = nn.BCEWithLogitsLoss(reduction='none')
self.fc1 = nn.Conv2d(256, self.dim_loi, 1)
self.pool1d = nn.MaxPool1d(self.n_pts0//self.n_pts1, self.n_pts0//self.n_pts1)
self.fc2 = nn.Sequential(
nn.Linear(self.dim_loi * self.n_pts1, self.dim_fc),
nn.ReLU(inplace=True),
nn.Linear(self.dim_fc, self.dim_fc),
nn.ReLU(inplace=True),
nn.Linear(self.dim_fc, 1),
)
self.train_step = 0
def pooling(self, features_per_image, lines_per_im):
h,w = features_per_image.size(1), features_per_image.size(2)
U,V = lines_per_im[:,:2], lines_per_im[:,2:]
sampled_points = U[:,:,None]*self.tspan + V[:,:,None]*(1-self.tspan) -0.5
sampled_points = sampled_points.permute((0,2,1)).reshape(-1,2)
px,py = sampled_points[:,0],sampled_points[:,1]
px0 = px.floor().clamp(min=0, max=w-1)
py0 = py.floor().clamp(min=0, max=h-1)
px1 = (px0 + 1).clamp(min=0, max=w-1)
py1 = (py0 + 1).clamp(min=0, max=h-1)
px0l, py0l, px1l, py1l = px0.long(), py0.long(), px1.long(), py1.long()
xp = ((features_per_image[:, py0l, px0l] * (py1-py) * (px1 - px)+ features_per_image[:, py1l, px0l] * (py - py0) * (px1 - px)+ features_per_image[:, py0l, px1l] * (py1 - py) * (px - px0)+ features_per_image[:, py1l, px1l] * (py - py0) * (px - px0)).reshape(self.dim_loi,-1,self.n_pts0)
).permute(1,0,2)
# if self.pool1d is not None:
xp = self.pool1d(xp)
features_per_line = xp.view(-1, self.n_pts1*self.dim_loi)
logits = self.fc2(features_per_line).flatten()
return logits
def forward(self, images, annotations = None):
if self.training:
return self.forward_train(images, annotations=annotations)
else:
return self.forward_test(images, annotations=annotations)
def forward_test(self, images, annotations = None):
device = images.device
extra_info = {
'time_backbone': 0.0,
'time_proposal': 0.0,
'time_matching': 0.0,
'time_verification': 0.0,
}
extra_info['time_backbone'] = time.time()
outputs, features = self.backbone(images)
loi_features = self.fc1(features)
output = outputs[0]
md_pred = output[:,:3].sigmoid()
# dis_pred = targets['dis']
dis_pred = output[:,3:4].sigmoid()
res_pred = output[:,4:5].sigmoid()
jloc_pred= output[:,5:7].softmax(1)[:,1:]
joff_pred= output[:,7:9].sigmoid() - 0.5
extra_info['time_backbone'] = time.time() - extra_info['time_backbone']
batch_size = md_pred.size(0)
assert batch_size == 1
extra_info['time_proposal'] = time.time()
if self.use_residual:
lines_pred = self.proposal_lines_new(md_pred[0],dis_pred[0],res_pred[0]).view(-1,4)
else:
lines_pred = self.proposal_lines_new(md_pred[0], dis_pred[0], None).view(-1, 4)
jloc_pred_nms = non_maximum_suppression(jloc_pred[0])
topK = min(300, int((jloc_pred_nms>0.008).float().sum().item()))
juncs_pred, _ = get_junctions(non_maximum_suppression(jloc_pred[0]),joff_pred[0], topk=topK)
extra_info['time_proposal'] = time.time() - extra_info['time_proposal']
extra_info['time_matching'] = time.time()
dis_junc_to_end1, idx_junc_to_end1 = torch.sum((lines_pred[:,:2]-juncs_pred[:,None])**2,dim=-1).min(0)
dis_junc_to_end2, idx_junc_to_end2 = torch.sum((lines_pred[:,2:] - juncs_pred[:, None]) ** 2, dim=-1).min(0)
idx_junc_to_end_min = torch.min(idx_junc_to_end1,idx_junc_to_end2)
idx_junc_to_end_max = torch.max(idx_junc_to_end1,idx_junc_to_end2)
iskeep = (idx_junc_to_end_min < idx_junc_to_end_max)# * (dis_junc_to_end1< 10*10)*(dis_junc_to_end2<10*10) # *(dis_junc_to_end2<100)
idx_lines_for_junctions = torch.unique(
torch.cat((idx_junc_to_end_min[iskeep,None],idx_junc_to_end_max[iskeep,None]),dim=1),
dim=0)
lines_adjusted = torch.cat((juncs_pred[idx_lines_for_junctions[:,0]], juncs_pred[idx_lines_for_junctions[:,1]]),dim=1)
extra_info['time_matching'] = time.time() - extra_info['time_matching']
extra_info['time_verification'] = time.time()
scores = self.pooling(loi_features[0],lines_adjusted).sigmoid()
lines_final = lines_adjusted[scores>0.05]
score_final = scores[scores>0.05]
sarg = torch.argsort(score_final,descending=True)
juncs_final = juncs_pred[idx_lines_for_junctions.unique()]
juncs_score = _[idx_lines_for_junctions.unique()]
extra_info['time_verification'] = time.time() - extra_info['time_verification']
sx = annotations[0]['width']/output.size(3)
sy = annotations[0]['height']/output.size(2)
lines_final[:,0] *= sx
lines_final[:,1] *= sy
lines_final[:,2] *= sx
lines_final[:,3] *= sy
juncs_final[:,0] *= sx
juncs_final[:,1] *= sy
output = {
'lines_pred': lines_final,
'lines_score': score_final,
'juncs_pred': juncs_final,
'juncs_score': juncs_score,
'num_proposals': lines_adjusted.size(0),
'filename': annotations[0]['filename'],
'width': annotations[0]['width'],
'height': annotations[0]['height'],
}
return output, extra_info
def forward_train(self, images, annotations = None):
device = images.device
targets , metas = self.hafm_encoder(annotations)
self.train_step += 1
outputs, features = self.backbone(images)
loss_dict = {
'loss_md': 0.0,
'loss_dis': 0.0,
'loss_res': 0.0,
'loss_jloc': 0.0,
'loss_joff': 0.0,
'loss_pos': 0.0,
'loss_neg': 0.0,
}
mask = targets['mask']
if targets is not None:
for nstack, output in enumerate(outputs):
loss_map = torch.mean(F.l1_loss(output[:,:3].sigmoid(), targets['md'],reduction='none'),dim=1,keepdim=True)
loss_dict['loss_md'] += torch.mean(loss_map*mask) / torch.mean(mask)
loss_map = F.l1_loss(output[:,3:4].sigmoid(), targets['dis'], reduction='none')
loss_dict['loss_dis'] += torch.mean(loss_map*mask) /torch.mean(mask)
loss_residual_map = F.l1_loss(output[:,4:5].sigmoid(), loss_map, reduction='none')
loss_dict['loss_res'] += torch.mean(loss_residual_map*mask)/torch.mean(mask)
loss_dict['loss_jloc'] += cross_entropy_loss_for_junction(output[:,5:7], targets['jloc'])
loss_dict['loss_joff'] += sigmoid_l1_loss(output[:,7:9], targets['joff'], -0.5, targets['jloc'])
loi_features = self.fc1(features)
output = outputs[0]
md_pred = output[:,:3].sigmoid()
dis_pred = output[:,3:4].sigmoid()
res_pred = output[:,4:5].sigmoid()
jloc_pred= output[:,5:7].softmax(1)[:,1:]
joff_pred= output[:,7:9].sigmoid() - 0.5
lines_batch = []
extra_info = {
}
batch_size = md_pred.size(0)
for i, (md_pred_per_im, dis_pred_per_im,res_pred_per_im,meta) in enumerate(zip(md_pred, dis_pred,res_pred,metas)):
lines_pred = []
if self.use_residual:
for scale in [-1.0,0.0,1.0]:
_ = self.proposal_lines(md_pred_per_im, dis_pred_per_im+scale*res_pred_per_im).view(-1, 4)
lines_pred.append(_)
else:
lines_pred.append(self.proposal_lines(md_pred_per_im, dis_pred_per_im).view(-1, 4))
lines_pred = torch.cat(lines_pred)
junction_gt = meta['junc']
N = junction_gt.size(0)
juncs_pred, _ = get_junctions(non_maximum_suppression(jloc_pred[i]),joff_pred[i], topk=min(N*2+2,self.n_dyn_junc))
dis_junc_to_end1, idx_junc_to_end1 = torch.sum((lines_pred[:,:2]-juncs_pred[:,None])**2,dim=-1).min(0)
dis_junc_to_end2, idx_junc_to_end2 = torch.sum((lines_pred[:, 2:] - juncs_pred[:, None]) ** 2, dim=-1).min(0)
idx_junc_to_end_min = torch.min(idx_junc_to_end1,idx_junc_to_end2)
idx_junc_to_end_max = torch.max(idx_junc_to_end1,idx_junc_to_end2)
iskeep = idx_junc_to_end_min<idx_junc_to_end_max
idx_lines_for_junctions = torch.cat((idx_junc_to_end_min[iskeep,None],idx_junc_to_end_max[iskeep,None]),dim=1).unique(dim=0)
idx_lines_for_junctions_mirror = torch.cat((idx_lines_for_junctions[:,1,None],idx_lines_for_junctions[:,0,None]),dim=1)
idx_lines_for_junctions = torch.cat((idx_lines_for_junctions, idx_lines_for_junctions_mirror))
lines_adjusted = torch.cat((juncs_pred[idx_lines_for_junctions[:,0]], juncs_pred[idx_lines_for_junctions[:,1]]),dim=1)
cost_, match_ = torch.sum((juncs_pred-junction_gt[:,None])**2,dim=-1).min(0)
match_[cost_>1.5*1.5] = N
Lpos = meta['Lpos']
Lneg = meta['Lneg']
labels = Lpos[match_[idx_lines_for_junctions[:,0]],match_[idx_lines_for_junctions[:,1]]]
iskeep = torch.zeros_like(labels, dtype= torch.bool)
cdx = labels.nonzero().flatten()
if len(cdx) > self.n_dyn_posl:
perm = torch.randperm(len(cdx),device=device)[:self.n_dyn_posl]
cdx = cdx[perm]
iskeep[cdx] = 1
if self.n_dyn_negl > 0:
cdx = Lneg[match_[idx_lines_for_junctions[:,0]],match_[idx_lines_for_junctions[:,1]]].nonzero().flatten()
if len(cdx) > self.n_dyn_negl:
perm = torch.randperm(len(cdx), device=device)[:self.n_dyn_negl]
cdx = cdx[perm]
iskeep[cdx] = 1
if self.n_dyn_othr > 0:
cdx = torch.randint(len(iskeep), (self.n_dyn_othr,), device=device)
iskeep[cdx] = 1
if self.n_dyn_othr2 >0 :
cdx = (labels==0).nonzero().flatten()
if len(cdx) > self.n_dyn_othr2:
perm = torch.randperm(len(cdx), device=device)[:self.n_dyn_othr2]
cdx = cdx[perm]
iskeep[cdx] = 1
lines_selected = lines_adjusted[iskeep]
labels_selected = labels[iskeep]
lines_for_train = torch.cat((lines_selected,meta['lpre']))
labels_for_train = torch.cat((labels_selected.float(),meta['lpre_label']))
logits = self.pooling(loi_features[i],lines_for_train)
loss_ = self.loss(logits, labels_for_train)
loss_positive = loss_[labels_for_train==1].mean()
loss_negative = loss_[labels_for_train==0].mean()
loss_dict['loss_pos'] += loss_positive/batch_size
loss_dict['loss_neg'] += loss_negative/batch_size
return loss_dict, extra_info
def proposal_lines(self, md_maps, dis_maps, scale=5.0):
"""
:param md_maps: 3xhxw, the range should be (0,1) for every element
:param dis_maps: 1xhxw
:return:
"""
device = md_maps.device
height, width = md_maps.size(1), md_maps.size(2)
_y = torch.arange(0,height,device=device).float()
_x = torch.arange(0,width, device=device).float()
y0,x0 = torch.meshgrid(_y,_x)
md_ = (md_maps[0]-0.5)*np.pi*2
st_ = md_maps[1]*np.pi/2
ed_ = -md_maps[2]*np.pi/2
cs_md = torch.cos(md_)
ss_md = torch.sin(md_)
cs_st = torch.cos(st_).clamp(min=1e-3)
ss_st = torch.sin(st_).clamp(min=1e-3)
cs_ed = torch.cos(ed_).clamp(min=1e-3)
ss_ed = torch.sin(ed_).clamp(max=-1e-3)
x_standard = torch.ones_like(cs_st)
y_st = ss_st/cs_st
y_ed = ss_ed/cs_ed
x_st_rotated = (cs_md - ss_md*y_st)*dis_maps[0]*scale
y_st_rotated = (ss_md + cs_md*y_st)*dis_maps[0]*scale
x_ed_rotated = (cs_md - ss_md*y_ed)*dis_maps[0]*scale
y_ed_rotated = (ss_md + cs_md*y_ed)*dis_maps[0]*scale
x_st_final = (x_st_rotated + x0).clamp(min=0,max=width-1)
y_st_final = (y_st_rotated + y0).clamp(min=0,max=height-1)
x_ed_final = (x_ed_rotated + x0).clamp(min=0,max=width-1)
y_ed_final = (y_ed_rotated + y0).clamp(min=0,max=height-1)
lines = torch.stack((x_st_final,y_st_final,x_ed_final,y_ed_final)).permute((1,2,0))
return lines#, normals
def proposal_lines_new(self, md_maps, dis_maps, residual_maps, scale=5.0):
"""
:param md_maps: 3xhxw, the range should be (0,1) for every element
:param dis_maps: 1xhxw
:return:
"""
device = md_maps.device
sign_pad = torch.tensor([-1,0,1],device=device,dtype=torch.float32).reshape(3,1,1)
if residual_maps is None:
dis_maps_new = dis_maps.repeat((1,1,1))
else:
dis_maps_new = dis_maps.repeat((3,1,1))+sign_pad*residual_maps.repeat((3,1,1))
height, width = md_maps.size(1), md_maps.size(2)
_y = torch.arange(0,height,device=device).float()
_x = torch.arange(0,width, device=device).float()
y0,x0 = torch.meshgrid(_y,_x)
md_ = (md_maps[0]-0.5)*np.pi*2
st_ = md_maps[1]*np.pi/2
ed_ = -md_maps[2]*np.pi/2
cs_md = torch.cos(md_)
ss_md = torch.sin(md_)
cs_st = torch.cos(st_).clamp(min=1e-3)
ss_st = torch.sin(st_).clamp(min=1e-3)
cs_ed = torch.cos(ed_).clamp(min=1e-3)
ss_ed = torch.sin(ed_).clamp(max=-1e-3)
y_st = ss_st/cs_st
y_ed = ss_ed/cs_ed
x_st_rotated = (cs_md-ss_md*y_st)[None]*dis_maps_new*scale
y_st_rotated = (ss_md + cs_md*y_st)[None]*dis_maps_new*scale
x_ed_rotated = (cs_md - ss_md*y_ed)[None]*dis_maps_new*scale
y_ed_rotated = (ss_md + cs_md*y_ed)[None]*dis_maps_new*scale
x_st_final = (x_st_rotated + x0[None]).clamp(min=0,max=width-1)
y_st_final = (y_st_rotated + y0[None]).clamp(min=0,max=height-1)
x_ed_final = (x_ed_rotated + x0[None]).clamp(min=0,max=width-1)
y_ed_final = (y_ed_rotated + y0[None]).clamp(min=0,max=height-1)
lines = torch.stack((x_st_final,y_st_final,x_ed_final,y_ed_final)).permute((1,2,3,0))
# normals = torch.stack((cs_md,ss_md)).permute((1,2,0))
return lines#, normals
def get_hawp_model(pretrained = False):
from parsing.config import cfg
import os
model = WireframeDetector(cfg)
if pretrained:
url = PRETRAINED.get('url')
hubdir = torch.hub.get_dir()
filename = os.path.basename(url)
dst = os.path.join(hubdir,filename)
state_dict = torch.hub.load_state_dict_from_url(url,dst)
model.load_state_dict(state_dict)
model = model.eval()
return model
return model
| 17,610 | 39.025 | 293 | py |
hawp | hawp-master/parsing/solver.py | import torch
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr=cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
# if 'md_predictor' in key or 'st_predictor' in key or 'ed_predictor' in key:
# lr = cfg.SOLVER.BASE_LR*100.0
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.OPTIMIZER == 'SGD':
optimizer = torch.optim.SGD(params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
weight_decay=cfg.SOLVER.WEIGHT_DECAY)
elif cfg.SOLVER.OPTIMIZER == 'ADAM':
optimizer = torch.optim.Adam(params, cfg.SOLVER.BASE_LR,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
amsgrad=cfg.SOLVER.AMSGRAD)
else:
raise NotImplementedError()
return optimizer
def make_lr_scheduler(cfg,optimizer):
return torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=cfg.SOLVER.STEPS,gamma=cfg.SOLVER.GAMMA) | 1,199 | 35.363636 | 109 | py |
hawp | hawp-master/parsing/dataset/test_dataset.py | import torch
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
import json
import copy
from PIL import Image
from skimage import io
import os
import os.path as osp
import numpy as np
class TestDatasetWithAnnotations(Dataset):
'''
Format of the annotation file
annotations[i] has the following dict items:
- filename # of the input image, str
- height # of the input image, int
- width # of the input image, int
- lines # of the input image, list of list, N*4
- junc # of the input image, list of list, M*2
'''
def __init__(self, root, ann_file, transform = None):
self.root = root
with open(ann_file, 'r') as _:
self.annotations = json.load(_)
self.transform = transform
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
ann = copy.deepcopy(self.annotations[idx])
# image = Image.open(osp.join(self.root,ann['filename'])).convert('RGB')
image = io.imread(osp.join(self.root,ann['filename'])).astype(float)[:,:,:3]
for key, _type in (['junc',np.float32],
['lines', np.float32]):
ann[key] = np.array(ann[key],dtype=_type)
if self.transform is not None:
return self.transform(image,ann)
return image, ann
def image(self, idx):
ann = copy.deepcopy(self.annotations[idx])
image = Image.open(osp.join(self.root,ann['filename'])).convert('RGB')
return image
@staticmethod
def collate_fn(batch):
return (default_collate([b[0] for b in batch]),
[b[1] for b in batch])
| 1,711 | 33.24 | 84 | py |
hawp | hawp-master/parsing/dataset/train_dataset.py | import torch
from torch.utils.data import Dataset
import os.path as osp
import json
import cv2
from skimage import io
from PIL import Image
import numpy as np
import random
from torch.utils.data.dataloader import default_collate
from torch.utils.data.dataloader import DataLoader
import matplotlib.pyplot as plt
from torchvision.transforms import functional as F
import copy
class TrainDataset(Dataset):
def __init__(self, root, ann_file, transform = None):
self.root = root
with open(ann_file,'r') as _:
self.annotations = json.load(_)
self.transform = transform
def __getitem__(self, idx_):
# print(idx_)
idx = idx_%len(self.annotations)
reminder = idx_//len(self.annotations)
ann = copy.deepcopy(self.annotations[idx])
ann['reminder'] = reminder
image = io.imread(osp.join(self.root,ann['filename'])).astype(float)[:,:,:3]
# image = Image.open(osp.join(self.root,ann['filename'])).convert('RGB')
for key,_type in (['junctions',np.float32],
['edges_positive',np.long],
['edges_negative',np.long]):
ann[key] = np.array(ann[key],dtype=_type)
width = ann['width']
height = ann['height']
if reminder == 1:
image = image[:,::-1,:]
# image = F.hflip(image)
ann['junctions'][:,0] = width-ann['junctions'][:,0]
elif reminder == 2:
# image = F.vflip(image)
image = image[::-1,:,:]
ann['junctions'][:,1] = height-ann['junctions'][:,1]
elif reminder == 3:
# image = F.vflip(F.hflip(image))
image = image[::-1,::-1,:]
ann['junctions'][:,0] = width-ann['junctions'][:,0]
ann['junctions'][:,1] = height-ann['junctions'][:,1]
else:
pass
if self.transform is not None:
return self.transform(image,ann)
return image, ann
def __len__(self):
return len(self.annotations)*4
def collate_fn(batch):
return (default_collate([b[0] for b in batch]),
[b[1] for b in batch]) | 2,175 | 33.539683 | 84 | py |
hawp | hawp-master/parsing/dataset/build.py | import torch
from .transforms import *
from . import train_dataset
from parsing.config.paths_catalog import DatasetCatalog
from . import test_dataset
def build_transform(cfg):
transforms = Compose(
[ResizeImage(cfg.DATASETS.IMAGE.HEIGHT,
cfg.DATASETS.IMAGE.WIDTH),
ToTensor(),
Normalize(cfg.DATASETS.IMAGE.PIXEL_MEAN,
cfg.DATASETS.IMAGE.PIXEL_STD,
cfg.DATASETS.IMAGE.TO_255)
]
)
return transforms
def build_train_dataset(cfg):
assert len(cfg.DATASETS.TRAIN) == 1
name = cfg.DATASETS.TRAIN[0]
dargs = DatasetCatalog.get(name)
factory = getattr(train_dataset,dargs['factory'])
args = dargs['args']
args['transform'] = Compose(
[Resize(cfg.DATASETS.IMAGE.HEIGHT,
cfg.DATASETS.IMAGE.WIDTH,
cfg.DATASETS.TARGET.HEIGHT,
cfg.DATASETS.TARGET.WIDTH),
ToTensor(),
Normalize(cfg.DATASETS.IMAGE.PIXEL_MEAN,
cfg.DATASETS.IMAGE.PIXEL_STD,
cfg.DATASETS.IMAGE.TO_255)])
dataset = factory(**args)
dataset = torch.utils.data.DataLoader(dataset,
batch_size=cfg.SOLVER.IMS_PER_BATCH,
collate_fn=train_dataset.collate_fn,
shuffle = True,
num_workers = cfg.DATALOADER.NUM_WORKERS)
return dataset
def build_test_dataset(cfg):
transforms = Compose(
[ResizeImage(cfg.DATASETS.IMAGE.HEIGHT,
cfg.DATASETS.IMAGE.WIDTH),
ToTensor(),
Normalize(cfg.DATASETS.IMAGE.PIXEL_MEAN,
cfg.DATASETS.IMAGE.PIXEL_STD,
cfg.DATASETS.IMAGE.TO_255)
]
)
datasets = []
for name in cfg.DATASETS.TEST:
dargs = DatasetCatalog.get(name)
factory = getattr(test_dataset,dargs['factory'])
args = dargs['args']
args['transform'] = transforms
dataset = factory(**args)
dataset = torch.utils.data.DataLoader(
dataset, batch_size = 1,
collate_fn = dataset.collate_fn,
num_workers = cfg.DATALOADER.NUM_WORKERS,
)
datasets.append((name,dataset))
return datasets | 2,631 | 38.283582 | 83 | py |
hawp | hawp-master/parsing/dataset/transforms.py | import torch
import torchvision
from torchvision.transforms import functional as F
from skimage.transform import resize
import numpy as np
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, ann=None):
if ann is None:
for t in self.transforms:
image = t(image)
return image
for t in self.transforms:
image, ann = t(image, ann)
return image, ann
class Resize(object):
def __init__(self, image_height, image_width, ann_height, ann_width):
self.image_height = image_height
self.image_width = image_width
self.ann_height = ann_height
self.ann_width = ann_width
def __call__(self, image, ann):
image = resize(image,(self.image_height,self.image_width))
image = np.array(image,dtype=np.float32)/255.0
sx = self.ann_width/ann['width']
sy = self.ann_height/ann['height']
ann['junctions'][:,0] = np.clip(ann['junctions'][:,0]*sx, 0, self.ann_width-1e-4)
ann['junctions'][:,1] = np.clip(ann['junctions'][:,1]*sy, 0, self.ann_height-1e-4)
ann['width'] = self.ann_width
ann['height'] = self.ann_height
return image, ann
class ResizeImage(object):
def __init__(self, image_height, image_width):
self.image_height = image_height
self.image_width = image_width
def __call__(self, image, ann=None):
image = resize(image,(self.image_height,self.image_width))
image = np.array(image,dtype=np.float32)/255.0
if ann is None:
return image
return image, ann
class ToTensor(object):
def __call__(self, image, anns=None):
if anns is None:
return F.to_tensor(image)
for key,val in anns.items():
if isinstance(val,np.ndarray):
anns[key] = torch.from_numpy(val)
return F.to_tensor(image),anns
class Normalize(object):
def __init__(self, mean, std,to_255 = False):
self.mean = mean
self.std = std
self.to_255 = to_255
def __call__(self, image, anns = None):
if self.to_255:
image*=255.0
image = F.normalize(image,mean=self.mean,std=self.std)
if anns is None:
return image
return image, anns
| 2,395 | 32.277778 | 90 | py |
hawp | hawp-master/parsing/encoder/hafm.py | import torch
import numpy as np
from torch.utils.data.dataloader import default_collate
from parsing import _C
class HAFMencoder(object):
def __init__(self, cfg):
self.dis_th = cfg.ENCODER.DIS_TH
self.ang_th = cfg.ENCODER.ANG_TH
self.num_static_pos_lines = cfg.ENCODER.NUM_STATIC_POS_LINES
self.num_static_neg_lines = cfg.ENCODER.NUM_STATIC_NEG_LINES
def __call__(self,annotations):
targets = []
metas = []
for ann in annotations:
t,m = self._process_per_image(ann)
targets.append(t)
metas.append(m)
return default_collate(targets),metas
def adjacent_matrix(self, n, edges, device):
mat = torch.zeros(n+1,n+1,dtype=torch.bool,device=device)
if edges.size(0)>0:
mat[edges[:,0], edges[:,1]] = 1
mat[edges[:,1], edges[:,0]] = 1
return mat
def _process_per_image(self,ann):
junctions = ann['junctions']
device = junctions.device
height, width = ann['height'], ann['width']
jmap = torch.zeros((height,width),device=device)
joff = torch.zeros((2,height,width),device=device,dtype=torch.float32)
# junctions[:,0] = junctions[:,0].clamp(min=0,max=width-1)
# junctions[:,1] = junctions[:,1].clamp(min=0,max=height-1)
xint,yint = junctions[:,0].long(), junctions[:,1].long()
off_x = junctions[:,0] - xint.float()-0.5
off_y = junctions[:,1] - yint.float()-0.5
jmap[yint,xint] = 1
joff[0,yint,xint] = off_x
joff[1,yint,xint] = off_y
edges_positive = ann['edges_positive']
edges_negative = ann['edges_negative']
pos_mat = self.adjacent_matrix(junctions.size(0),edges_positive,device)
neg_mat = self.adjacent_matrix(junctions.size(0),edges_negative,device)
lines = torch.cat((junctions[edges_positive[:,0]], junctions[edges_positive[:,1]]),dim=-1)
lines_neg = torch.cat((junctions[edges_negative[:2000,0]],junctions[edges_negative[:2000,1]]),dim=-1)
lmap, _, _ = _C.encodels(lines,height,width,height,width,lines.size(0))
lpos = np.random.permutation(lines.cpu().numpy())[:self.num_static_pos_lines]
lneg = np.random.permutation(lines_neg.cpu().numpy())[:self.num_static_neg_lines]
# lpos = lines[torch.randperm(lines.size(0),device=device)][:self.num_static_pos_lines]
# lneg = lines_neg[torch.randperm(lines_neg.size(0),device=device)][:self.num_static_neg_lines]
lpos = torch.from_numpy(lpos).to(device)
lneg = torch.from_numpy(lneg).to(device)
lpre = torch.cat((lpos,lneg),dim=0)
_swap = (torch.rand(lpre.size(0))>0.5).to(device)
lpre[_swap] = lpre[_swap][:,[2,3,0,1]]
lpre_label = torch.cat(
[
torch.ones(lpos.size(0),device=device),
torch.zeros(lneg.size(0),device=device)
])
meta = {
'junc': junctions,
'Lpos': pos_mat,
'Lneg': neg_mat,
'lpre': lpre,
'lpre_label': lpre_label,
'lines': lines,
}
dismap = torch.sqrt(lmap[0]**2+lmap[1]**2)[None]
def _normalize(inp):
mag = torch.sqrt(inp[0]*inp[0]+inp[1]*inp[1])
return inp/(mag+1e-6)
md_map = _normalize(lmap[:2])
st_map = _normalize(lmap[2:4])
ed_map = _normalize(lmap[4:])
md_ = md_map.reshape(2,-1).t()
st_ = st_map.reshape(2,-1).t()
ed_ = ed_map.reshape(2,-1).t()
Rt = torch.cat(
(torch.cat((md_[:,None,None,0],md_[:,None,None,1]),dim=2),
torch.cat((-md_[:,None,None,1], md_[:,None,None,0]),dim=2)),dim=1)
R = torch.cat(
(torch.cat((md_[:,None,None,0], -md_[:,None,None,1]),dim=2),
torch.cat((md_[:,None,None,1], md_[:,None,None,0]),dim=2)),dim=1)
Rtst_ = torch.matmul(Rt, st_[:,:,None]).squeeze(-1).t()
Rted_ = torch.matmul(Rt, ed_[:,:,None]).squeeze(-1).t()
swap_mask = (Rtst_[1]<0)*(Rted_[1]>0)
pos_ = Rtst_.clone()
neg_ = Rted_.clone()
temp = pos_[:,swap_mask]
pos_[:,swap_mask] = neg_[:,swap_mask]
neg_[:,swap_mask] = temp
pos_[0] = pos_[0].clamp(min=1e-9)
pos_[1] = pos_[1].clamp(min=1e-9)
neg_[0] = neg_[0].clamp(min=1e-9)
neg_[1] = neg_[1].clamp(max=-1e-9)
mask = ((pos_[1]>self.ang_th)*(neg_[1]<-self.ang_th)*(dismap.view(-1)<=self.dis_th)).float()
pos_map = pos_.reshape(-1,height,width)
neg_map = neg_.reshape(-1,height,width)
md_angle = torch.atan2(md_map[1], md_map[0])
pos_angle = torch.atan2(pos_map[1],pos_map[0])
neg_angle = torch.atan2(neg_map[1],neg_map[0])
pos_angle_n = pos_angle/(np.pi/2)
neg_angle_n = -neg_angle/(np.pi/2)
md_angle_n = md_angle/(np.pi*2) + 0.5
mask = mask.reshape(height,width)
hafm_ang = torch.cat((md_angle_n[None],pos_angle_n[None],neg_angle_n[None],),dim=0)
hafm_dis = dismap.clamp(max=self.dis_th)/self.dis_th
mask = mask[None]
target = {'jloc':jmap[None],
'joff':joff,
'md': hafm_ang,
'dis': hafm_dis,
'mask': mask
}
return target, meta | 5,407 | 39.358209 | 109 | py |
hawp | hawp-master/parsing/utils/c2_model_loading.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import pickle
from collections import OrderedDict
import torch
from parsing.utils.model_serialization import load_state_dict
from parsing.utils.registry import Registry
def _rename_basic_resnet_weights(layer_keys):
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [k.replace(".w", ".weight") for k in layer_keys]
layer_keys = [k.replace(".bn", "_bn") for k in layer_keys]
layer_keys = [k.replace(".b", ".bias") for k in layer_keys]
layer_keys = [k.replace("_bn.s", "_bn.scale") for k in layer_keys]
layer_keys = [k.replace(".biasranch", ".branch") for k in layer_keys]
layer_keys = [k.replace("bbox.pred", "bbox_pred") for k in layer_keys]
layer_keys = [k.replace("cls.score", "cls_score") for k in layer_keys]
layer_keys = [k.replace("res.conv1_", "conv1_") for k in layer_keys]
# RPN / Faster RCNN
layer_keys = [k.replace(".biasbox", ".bbox") for k in layer_keys]
layer_keys = [k.replace("conv.rpn", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox.pred", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [k.replace("rpn.cls.logits", "rpn.cls_logits") for k in layer_keys]
# Affine-Channel -> BatchNorm enaming
layer_keys = [k.replace("_bn.scale", "_bn.weight") for k in layer_keys]
# Make torchvision-compatible
layer_keys = [k.replace("conv1_bn.", "bn1.") for k in layer_keys]
layer_keys = [k.replace("res2.", "layer1.") for k in layer_keys]
layer_keys = [k.replace("res3.", "layer2.") for k in layer_keys]
layer_keys = [k.replace("res4.", "layer3.") for k in layer_keys]
layer_keys = [k.replace("res5.", "layer4.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2a_bn.", ".bn1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2b_bn.", ".bn2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
layer_keys = [k.replace(".branch2c_bn.", ".bn3.") for k in layer_keys]
layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys]
layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys]
# GroupNorm
layer_keys = [k.replace("conv1.gn.s", "bn1.weight") for k in layer_keys]
layer_keys = [k.replace("conv1.gn.bias", "bn1.bias") for k in layer_keys]
layer_keys = [k.replace("conv2.gn.s", "bn2.weight") for k in layer_keys]
layer_keys = [k.replace("conv2.gn.bias", "bn2.bias") for k in layer_keys]
layer_keys = [k.replace("conv3.gn.s", "bn3.weight") for k in layer_keys]
layer_keys = [k.replace("conv3.gn.bias", "bn3.bias") for k in layer_keys]
layer_keys = [k.replace("downsample.0.gn.s", "downsample.1.weight") \
for k in layer_keys]
layer_keys = [k.replace("downsample.0.gn.bias", "downsample.1.bias") \
for k in layer_keys]
return layer_keys
def _rename_fpn_weights(layer_keys, stage_names):
for mapped_idx, stage_name in enumerate(stage_names, 1):
suffix = ""
if mapped_idx < 4:
suffix = ".lateral"
layer_keys = [
k.replace("fpn.inner.layer{}.sum{}".format(stage_name, suffix), "fpn_inner{}".format(mapped_idx)) for k in layer_keys
]
layer_keys = [k.replace("fpn.layer{}.sum".format(stage_name), "fpn_layer{}".format(mapped_idx)) for k in layer_keys]
layer_keys = [k.replace("rpn.conv.fpn2", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox_pred.fpn2", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [
k.replace("rpn.cls_logits.fpn2", "rpn.cls_logits") for k in layer_keys
]
return layer_keys
def _rename_weights_for_resnet(weights, stage_names):
original_keys = sorted(weights.keys())
layer_keys = sorted(weights.keys())
# for X-101, rename output to fc1000 to avoid conflicts afterwards
layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys]
layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys]
# performs basic renaming: _ -> . , etc
layer_keys = _rename_basic_resnet_weights(layer_keys)
# FPN
layer_keys = _rename_fpn_weights(layer_keys, stage_names)
# Mask R-CNN
layer_keys = [k.replace("mask.fcn.logits", "mask_fcn_logits") for k in layer_keys]
layer_keys = [k.replace(".[mask].fcn", "mask_fcn") for k in layer_keys]
layer_keys = [k.replace("conv5.mask", "conv5_mask") for k in layer_keys]
# Keypoint R-CNN
layer_keys = [k.replace("kps.score.lowres", "kps_score_lowres") for k in layer_keys]
layer_keys = [k.replace("kps.score", "kps_score") for k in layer_keys]
layer_keys = [k.replace("conv.fcn", "conv_fcn") for k in layer_keys]
# Rename for our RPN structure
layer_keys = [k.replace("rpn.", "rpn.head.") for k in layer_keys]
key_map = {k: v for k, v in zip(original_keys, layer_keys)}
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights")
max_c2_key_size = max([len(k) for k in original_keys if "_momentum" not in k])
new_weights = OrderedDict()
for k in original_keys:
v = weights[k]
if "_momentum" in k:
continue
# if 'fc1000' in k:
# continue
w = torch.from_numpy(v)
# if "bn" in k:
# w = w.view(1, -1, 1, 1)
logger.info("C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]))
new_weights[key_map[k]] = w
return new_weights
def _load_c2_pickled_weights(file_path):
with open(file_path, "rb") as f:
if torch._six.PY3:
data = pickle.load(f, encoding="latin1")
else:
data = pickle.load(f)
if "blobs" in data:
weights = data["blobs"]
else:
weights = data
return weights
#TODO: DELETE THE DCN parts
# def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
# import re
# logger = logging.getLogger(__name__)
# logger.info("Remapping conv weights for deformable conv weights")
# layer_keys = sorted(state_dict.keys())
# for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
# if not stage_with_dcn:
# continue
# for old_key in layer_keys:
# pattern = ".*layer{}.*conv2.*".format(ix)
# r = re.match(pattern, old_key)
# if r is None:
# continue
# for param in ["weight", "bias"]:
# if old_key.find(param) is -1:
# continue
# new_key = old_key.replace(
# "conv2.{}".format(param), "conv2.conv.{}".format(param)
# )
# logger.info("pattern: {}, old_key: {}, new_key: {}".format(
# pattern, old_key, new_key
# ))
# state_dict[new_key] = state_dict[old_key]
# del state_dict[old_key]
# return state_dict
_C2_STAGE_NAMES = {
"R-50": ["1.2", "2.3", "3.5", "4.2"],
"R-101": ["1.2", "2.3", "3.22", "4.2"],
"R-152": ["1.2", "2.7", "3.35", "4.2"],
}
C2_FORMAT_LOADER = Registry()
@C2_FORMAT_LOADER.register("R-50-FPN-RETINANET-DEC")
@C2_FORMAT_LOADER.register("R-50-DEC")
@C2_FORMAT_LOADER.register("R-50-FPN-DEC")
@C2_FORMAT_LOADER.register("R-50-C4")
@C2_FORMAT_LOADER.register("R-50-C5")
@C2_FORMAT_LOADER.register("R-101-C4")
@C2_FORMAT_LOADER.register("R-101-C5")
@C2_FORMAT_LOADER.register("R-50-FPN")
@C2_FORMAT_LOADER.register("R-50-FPN-RETINANET")
@C2_FORMAT_LOADER.register("R-101-FPN")
@C2_FORMAT_LOADER.register("R-101-FPN-RETINANET")
@C2_FORMAT_LOADER.register("R-152-FPN")
def load_resnet_c2_format(cfg, f):
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body.replace("-C4", "").replace("-C5", "").replace("-FPN", "").replace("-DEC","")
arch = arch.replace("-RETINANET", "")
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
# ***********************************
# for deformable convolutional layer
# state_dict = _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg)
# ***********************************
return dict(model=state_dict)
def load_c2_format(cfg, f):
return C2_FORMAT_LOADER[cfg.MODEL.BACKBONE.CONV_BODY](cfg, f)
| 8,543 | 39.880383 | 129 | py |
hawp | hawp-master/parsing/utils/metric_logger.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import defaultdict
from collections import deque
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
keys = sorted(self.meters)
# for name, meter in self.meters.items():
for name in keys:
meter = self.meters[name]
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
def tensorborad(self, iteration, writter, phase='train'):
for name, meter in self.meters.items():
if 'loss' in name:
# writter.add_scalar('average/{}'.format(name), meter.avg, iteration)
writter.add_scalar('{}/global/{}'.format(phase,name), meter.global_avg, iteration)
# writter.add_scalar('median/{}'.format(name), meter.median, iteration)
| 2,379 | 29.512821 | 98 | py |
hawp | hawp-master/parsing/utils/checkpoint.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import torch
from parsing.utils.model_serialization import load_state_dict
from parsing.utils.c2_model_loading import load_c2_format
from parsing.utils.imports import import_file
from parsing.utils.model_zoo import cache_url
class Checkpointer(object):
def __init__(
self,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.save_dir = save_dir
self.save_to_disk = save_to_disk
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
def save(self, name, **kwargs):
if not self.save_dir:
return
if not self.save_to_disk:
return
data = {}
data["model"] = self.model.state_dict()
if self.optimizer is not None:
data["optimizer"] = self.optimizer.state_dict()
if self.scheduler is not None:
data["scheduler"] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, "{}.pth".format(name))
self.logger.info("Saving checkpoint to {}".format(save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
def load(self, f=None, use_latest=True):
if self.has_checkpoint() and use_latest:
# override argument with existing checkpoint
f = self.get_checkpoint_file()
if not f:
# no checkpoint could be found
self.logger.info("No checkpoint found. Initializing model from scratch")
return {}
self.logger.info("Loading checkpoint from {}".format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
if "optimizer" in checkpoint and self.optimizer:
self.logger.info("Loading optimizer from {}".format(f))
self.optimizer.load_state_dict(checkpoint.pop("optimizer"))
if "scheduler" in checkpoint and self.scheduler:
self.logger.info("Loading scheduler from {}".format(f))
self.scheduler.load_state_dict(checkpoint.pop("scheduler"))
# return any further checkpoint data
return checkpoint
def has_checkpoint(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
return os.path.exists(save_file)
def get_checkpoint_file(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
last_saved = ""
return last_saved
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, "last_checkpoint")
with open(save_file, "w") as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f, map_location=torch.device("cpu"))
def _load_model(self, checkpoint):
load_state_dict(self.model, checkpoint.pop("model"))
class DetectronCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(DetectronCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(DetectronCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
| 4,773 | 33.1 | 84 | py |
hawp | hawp-master/parsing/utils/comm.py | """
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def to_device(data,device):
if isinstance(data,torch.Tensor):
return data.to(device)
if isinstance(data, dict):
# import pdb; pdb.set_trace()
for key in data:
if isinstance(data[key],torch.Tensor):
data[key] = data[key].to(device)
return data
if isinstance(data,list):
return [to_device(d,device) for d in data] | 3,764 | 28.186047 | 84 | py |
hawp | hawp-master/parsing/utils/model_zoo.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import sys
try:
from torch.hub import _download_url_to_file
from torch.hub import urlparse
from torch.hub import HASH_REGEX
except ImportError:
from torch.utils.model_zoo import _download_url_to_file
from torch.utils.model_zoo import urlparse
from torch.utils.model_zoo import HASH_REGEX
from parsing.utils.comm import is_main_process
from parsing.utils.comm import synchronize
# very similar to https://github.com/pytorch/pytorch/blob/master/torch/utils/model_zoo.py
# but with a few improvements and modifications
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv("TORCH_HOME", "~/.torch"))
model_dir = os.getenv("TORCH_MODEL_ZOO", os.path.join(torch_home, "models"))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file
| 3,023 | 47.774194 | 135 | py |
hawp | hawp-master/parsing/utils/model_serialization.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
import logging
import torch
from parsing.utils.imports import import_file
def align_and_update_state_dicts(model_state_dict, loaded_state_dict):
"""
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# loaded_key string, if it matches
# import pdb
# pdb.set_trace()
match_matrix = [
len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys
]
match_matrix = torch.as_tensor(match_matrix).view(
len(current_keys), len(loaded_keys)
)
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_size = max([len(key) for key in current_keys]) if current_keys else 1
max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
logger = logging.getLogger(__name__)
# for idx_new, idx_old in enumerate(idxs.tolist()):
# if idx_old == -1:
# continue
# key = current_keys[idx_new]
# key_old = loaded_keys[idx_old]
#
#
# print(key, model_state_dict[key].shape, key_old, loaded_state_dict[key_old].shape)
# import pdb
# pdb.set_trace()
for idx_new, idx_old in enumerate(idxs.tolist()):
if idx_old == -1:
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
model_state_dict[key] = loaded_state_dict[key_old]
logger.info(
log_str_template.format(
key,
max_size,
key_old,
max_size_loaded,
tuple(loaded_state_dict[key_old].shape),
)
)
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def load_state_dict(model, loaded_state_dict):
model_state_dict = model.state_dict()
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.")
align_and_update_state_dicts(model_state_dict, loaded_state_dict)
# use strict loading
model.load_state_dict(model_state_dict)
| 3,823 | 39.680851 | 92 | py |
hawp | hawp-master/parsing/utils/imports.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import importlib
import importlib.util
import sys
def import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module
| 446 | 28.8 | 73 | py |
hawp | hawp-master/parsing/backbones/stacked_hg.py | """
Hourglass network inserted in the pre-activated Resnet
Use lr=0.01 for current version
(c) Nan Xue (HAWP)
(c) Yichao Zhou (LCNN)
(c) YANG, Wei
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ["HourglassNet", "hg"]
class Bottleneck2D(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck2D, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class Hourglass(nn.Module):
def __init__(self, block, num_blocks, planes, depth):
super(Hourglass, self).__init__()
self.depth = depth
self.block = block
self.hg = self._make_hour_glass(block, num_blocks, planes, depth)
def _make_residual(self, block, num_blocks, planes):
layers = []
for i in range(0, num_blocks):
layers.append(block(planes * block.expansion, planes))
return nn.Sequential(*layers)
def _make_hour_glass(self, block, num_blocks, planes, depth):
hg = []
for i in range(depth):
res = []
for j in range(3):
res.append(self._make_residual(block, num_blocks, planes))
if i == 0:
res.append(self._make_residual(block, num_blocks, planes))
hg.append(nn.ModuleList(res))
return nn.ModuleList(hg)
def _hour_glass_forward(self, n, x):
up1 = self.hg[n - 1][0](x)
low1 = F.max_pool2d(x, 2, stride=2)
low1 = self.hg[n - 1][1](low1)
if n > 1:
low2 = self._hour_glass_forward(n - 1, low1)
else:
low2 = self.hg[n - 1][3](low1)
low3 = self.hg[n - 1][2](low2)
up2 = F.interpolate(low3, scale_factor=2)
out = up1 + up2
return out
def forward(self, x):
return self._hour_glass_forward(self.depth, x)
class HourglassNet(nn.Module):
"""Hourglass model from Newell et al ECCV 2016"""
def __init__(self, inplanes, num_feats, block, head, depth, num_stacks, num_blocks, num_classes):
super(HourglassNet, self).__init__()
self.inplanes = inplanes
self.num_feats = num_feats
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, 1)
self.layer3 = self._make_residual(block, self.num_feats, 1)
self.maxpool = nn.MaxPool2d(2, stride=2)
# build hourglass modules
ch = self.num_feats * block.expansion
# vpts = []
hg, res, fc, score, fc_, score_ = [], [], [], [], [], []
for i in range(num_stacks):
hg.append(Hourglass(block, num_blocks, self.num_feats, depth))
res.append(self._make_residual(block, self.num_feats, num_blocks))
fc.append(self._make_fc(ch, ch))
score.append(head(ch, num_classes))
# vpts.append(VptsHead(ch))
# vpts.append(nn.Linear(ch, 9))
# score.append(nn.Conv2d(ch, num_classes, kernel_size=1))
# score[i].bias.data[0] += 4.6
# score[i].bias.data[2] += 4.6
if i < num_stacks - 1:
fc_.append(nn.Conv2d(ch, ch, kernel_size=1))
score_.append(nn.Conv2d(num_classes, ch, kernel_size=1))
self.hg = nn.ModuleList(hg)
self.res = nn.ModuleList(res)
self.fc = nn.ModuleList(fc)
self.score = nn.ModuleList(score)
# self.vpts = nn.ModuleList(vpts)
self.fc_ = nn.ModuleList(fc_)
self.score_ = nn.ModuleList(score_)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y = self.hg[i](x)
y = self.res[i](y)
y = self.fc[i](y)
score = self.score[i](y)
out.append(score)
if i < self.num_stacks - 1:
fc_ = self.fc_[i](y)
score_ = self.score_[i](score)
x = x + fc_ + score_
return out[::-1], y
| 6,111 | 31.684492 | 101 | py |
hawp | hawp-master/parsing/backbones/multi_task_head.py | import torch
import torch.nn as nn
class MultitaskHead(nn.Module):
def __init__(self, input_channels, num_class, head_size):
super(MultitaskHead, self).__init__()
m = int(input_channels / 4)
heads = []
for output_channels in sum(head_size, []):
heads.append(
nn.Sequential(
nn.Conv2d(input_channels, m, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(m, output_channels, kernel_size=1),
)
)
self.heads = nn.ModuleList(heads)
assert num_class == sum(sum(head_size, []))
def forward(self, x):
return torch.cat([head(x) for head in self.heads], dim=1)
class AngleDistanceHead(nn.Module):
def __init__(self, input_channels, num_class, head_size):
super(AngleDistanceHead, self).__init__()
m = int(input_channels/4)
heads = []
for output_channels in sum(head_size, []):
if output_channels != 2:
heads.append(
nn.Sequential(
nn.Conv2d(input_channels, m, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(m, output_channels, kernel_size=1),
)
)
else:
heads.append(
nn.Sequential(
nn.Conv2d(input_channels, m, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
CosineSineLayer(m)
)
)
self.heads = nn.ModuleList(heads)
assert num_class == sum(sum(head_size, []))
def forward(self, x):
return torch.cat([head(x) for head in self.heads], dim=1) | 1,816 | 34.627451 | 79 | py |
hawp | hawp-master/scripts/test.py | import torch
from parsing.config import cfg
from parsing.utils.comm import to_device
from parsing.dataset import build_test_dataset
from parsing.detector import get_hawp_model
from parsing.utils.logger import setup_logger
from parsing.utils.checkpoint import DetectronCheckpointer
from parsing.config.paths_catalog import DatasetCatalog
from parsing.utils.metric_evaluation import TPFP, AP
import os
import os.path as osp
import argparse
import logging
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
import numpy as np
AVAILABLE_DATASETS = ('wireframe_test', 'york_test')
parser = argparse.ArgumentParser(description='HAWP Testing')
parser.add_argument("--config-file",
metavar="FILE",
help="path to config file",
type=str,
default=None,
)
parser.add_argument("--display",
default=False,
action='store_true')
parser.add_argument('-t','--threshold', dest='threshold', type=float, default=10.0, help="the threshold for sAP evaluation")
parser.add_argument("opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER
)
args = parser.parse_args()
def test(cfg):
logger = logging.getLogger("hawp.testing")
device = cfg.MODEL.DEVICE
model = get_hawp_model(pretrained=args.config_file is None)
model = model.to(device)
test_datasets = build_test_dataset(cfg)
output_dir = cfg.OUTPUT_DIR
if args.config_file is not None:
checkpointer = DetectronCheckpointer(cfg,
model,
save_dir=cfg.OUTPUT_DIR,
save_to_disk=True,
logger=logger)
_ = checkpointer.load()
model = model.eval()
for name, dataset in test_datasets:
results = []
logger.info('Testing on {} dataset'.format(name))
for i, (images, annotations) in enumerate(tqdm(dataset)):
with torch.no_grad():
output, extra_info = model(images.to(device), annotations)
output = to_device(output,'cpu')
if args.display:
im = dataset.dataset.image(i)
plt.imshow(im)
lines = output['lines_pred'].numpy()
scores = output['lines_score'].numpy()
plt.plot([lines[scores>0.97,0],lines[scores>0.97,2]],
[lines[scores>0.97,1],lines[scores>0.97,3]], 'r-')
plt.show()
for k in output.keys():
if isinstance(output[k], torch.Tensor):
output[k] = output[k].tolist()
results.append(output)
outpath_dataset = osp.join(output_dir,'{}.json'.format(name))
logger.info('Writing the results of the {} dataset into {}'.format(name,
outpath_dataset))
with open(outpath_dataset,'w') as _out:
json.dump(results,_out)
if name not in AVAILABLE_DATASETS:
continue
logger.info('evaluating the results on the {} dataset'.format(name))
ann_file = DatasetCatalog.get(name)['args']['ann_file']
with open(ann_file,'r') as _ann:
annotations_list = json.load(_ann)
annotations_dict = {
ann['filename']: ann for ann in annotations_list
}
with open(outpath_dataset,'r') as _res:
result_list = json.load(_res)
tp_list, fp_list, scores_list = [],[],[]
n_gt = 0
for res in result_list:
filename = res['filename']
gt = annotations_dict[filename]
lines_pred = np.array(res['lines_pred'],dtype=np.float32)
scores = np.array(res['lines_score'],dtype=np.float32)
sort_idx = np.argsort(-scores)
lines_pred = lines_pred[sort_idx]
scores = scores[sort_idx]
# import pdb; pdb.set_trace()
lines_pred[:,0] *= 128/float(res['width'])
lines_pred[:,1] *= 128/float(res['height'])
lines_pred[:,2] *= 128/float(res['width'])
lines_pred[:,3] *= 128/float(res['height'])
lines_gt = np.array(gt['lines'],dtype=np.float32)
lines_gt[:,0] *= 128/float(gt['width'])
lines_gt[:,1] *= 128/float(gt['height'])
lines_gt[:,2] *= 128/float(gt['width'])
lines_gt[:,3] *= 128/float(gt['height'])
tp, fp = TPFP(lines_pred,lines_gt,args.threshold)
n_gt += lines_gt.shape[0]
tp_list.append(tp)
fp_list.append(fp)
scores_list.append(scores)
tp_list = np.concatenate(tp_list)
fp_list = np.concatenate(fp_list)
scores_list = np.concatenate(scores_list)
idx = np.argsort(scores_list)[::-1]
tp = np.cumsum(tp_list[idx])/n_gt
fp = np.cumsum(fp_list[idx])/n_gt
rcs = tp
pcs = tp/np.maximum(tp+fp,1e-9)
sAP = AP(tp,fp)*100
sAP_string = 'sAP{} = {:.1f}'.format(args.threshold,sAP)
logger.info(sAP_string)
try:
f_scores = np.linspace(0.2,0.9,num=8)
for f_score in f_scores:
x = np.linspace(0.01,1)
y = f_score*x/(2*x-f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color=[0,0.5,0], alpha=0.3)
plt.annotate("f={0:0.1}".format(f_score), xy=(0.9, y[45] + 0.02), alpha=0.4,fontsize=10)
plt.rc('legend',fontsize=10)
plt.grid(True)
plt.axis([0.0, 1.0, 0.0, 1.0])
plt.xticks(np.arange(0, 1.0, step=0.1))
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.yticks(np.arange(0, 1.0, step=0.1))
plt.plot(rcs,pcs,'r-')
plt.title(sAP_string)
plt.show()
except:
pass
if __name__ == "__main__":
if args.config_file is not None:
cfg.merge_from_file(args.config_file)
else:
cfg.OUTPUT_DIR = 'outputs/default'
os.makedirs(cfg.OUTPUT_DIR,exist_ok=True)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
logger = setup_logger('hawp', output_dir)
logger.info(args)
if args.config_file is not None:
logger.info("Loaded configuration file {}".format(args.config_file))
else:
logger.info("Loaded the default configuration for testing")
test(cfg)
| 6,703 | 35.63388 | 124 | py |
hawp | hawp-master/scripts/predict.py | import torch
from parsing.config import cfg
from parsing.utils.comm import to_device
from parsing.dataset.build import build_transform
from parsing.detector import get_hawp_model
from parsing.utils.logger import setup_logger
from skimage import io
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='HAWP Testing')
parser.add_argument("--img",type=str,required=True,
help="image path")
parser.add_argument("--threshold",
type=float,
default=0.97)
args = parser.parse_args()
def test(cfg, impath):
device = cfg.MODEL.DEVICE
model = get_hawp_model(pretrained=True)
model = model.to(device)
transform = build_transform(cfg)
image = io.imread(impath)
image_tensor = transform(image.astype(float))[None].to(device)
meta = {
'filename': impath,
'height': image.shape[0],
'width': image.shape[1],
}
with torch.no_grad():
output, _ = model(image_tensor,[meta])
output = to_device(output,'cpu')
lines = output['lines_pred'].numpy()
scores = output['lines_score'].numpy()
idx = scores>args.threshold
plt.figure(figsize=(6,6))
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.imshow(image)
plt.plot([lines[idx,0],lines[idx,2]],
[lines[idx,1],lines[idx,3]], 'b-')
plt.plot(lines[idx,0],lines[idx,1],'c.')
plt.plot(lines[idx,2],lines[idx,3],'c.')
plt.axis('off')
plt.show()
if __name__ == "__main__":
cfg.freeze()
test(cfg,args.img)
| 1,725 | 27.295082 | 68 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.