code stringlengths 17 6.64M |
|---|
def drn_d_40(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-40'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def drn_d_54(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-54'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def drn_d_105(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-105'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def conv_bn(inp, oup, stride, BatchNorm):
return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), BatchNorm(oup), nn.ReLU6(inplace=True))
|
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (dilation - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
|
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, dilation, expand_ratio, BatchNorm):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = round((inp * expand_ratio))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
self.kernel_size = 3
self.dilation = dilation
if (expand_ratio == 1):
self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False), BatchNorm(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, 1, bias=False), BatchNorm(oup))
else:
self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0, 1, bias=False), BatchNorm(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False), BatchNorm(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, bias=False), BatchNorm(oup))
def forward(self, x):
x_pad = fixed_padding(x, self.kernel_size, dilation=self.dilation)
if self.use_res_connect:
x = (x + self.conv(x_pad))
else:
x = self.conv(x_pad)
return x
|
class MobileNetV2(nn.Module):
def __init__(self, output_stride=8, BatchNorm=None, width_mult=1.0, pretrained=True):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
current_stride = 1
rate = 1
interverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
input_channel = int((input_channel * width_mult))
self.features = [conv_bn(3, input_channel, 2, BatchNorm)]
current_stride *= 2
for (t, c, n, s) in interverted_residual_setting:
if (current_stride == output_stride):
stride = 1
dilation = rate
rate *= s
else:
stride = s
dilation = 1
current_stride *= s
output_channel = int((c * width_mult))
for i in range(n):
if (i == 0):
self.features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm))
else:
self.features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm))
input_channel = output_channel
self.features = nn.Sequential(*self.features)
self._initialize_weights()
if pretrained:
self._load_pretrained_model()
self.low_level_features = self.features[0:4]
self.high_level_features = self.features[4:]
def forward(self, x):
low_level_feat = self.low_level_features(x)
x = self.high_level_features(low_level_feat)
return (x, low_level_feat)
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://jeff95.me/models/mobilenet_v2-6a65762b.pth')
model_dict = {}
state_dict = self.state_dict()
for (k, v) in pretrain_dict.items():
if (k in state_dict):
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = BatchNorm((planes * 4))
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True):
self.inplanes = 64
super(ResNet, self).__init__()
blocks = [1, 2, 4]
if (output_stride == 16):
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif (output_stride == 8):
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
self._init_weight()
if pretrained:
self._load_pretrained_model()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=(blocks[0] * dilation), downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = (planes * block.expansion)
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1, dilation=(blocks[i] * dilation), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.maxpool(x)
x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return (x, low_level_feat)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
model_dict = {}
state_dict = self.state_dict()
for (k, v) in pretrain_dict.items():
if (k in state_dict):
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
|
def ResNet101(output_stride, BatchNorm, pretrained=True):
'Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=pretrained)
return model
|
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (dilation - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
|
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=None):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation, groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
|
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, BatchNorm=None, start_with_relu=True, grow_first=True, is_last=False, skip=None):
super(Block, self).__init__()
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm(planes)
elif (skip is not None):
if (skip == 'conv'):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm(planes)
else:
self.skip = None
self.relu = nn.ReLU()
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
filters = planes
for i in range((reps - 1)):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(filters))
if (not grow_first):
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if (stride != 1):
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 2, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if ((stride == 1) and is_last):
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 1, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if (not start_with_relu):
rep = rep[1:]
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = (x + skip)
return x
|
class AlignedXception(nn.Module):
'\n Modified Alighed Xception\n '
def __init__(self, output_stride, BatchNorm, pretrained=False, mode='xception_71'):
super(AlignedXception, self).__init__()
if (output_stride == 16):
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif (output_stride == 8):
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm(32)
self.relu = nn.ReLU()
self.block0_0 = nn.Sequential(self.conv1, self.bn1, self.relu)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm(64)
self.block0_1 = nn.Sequential(self.conv2, self.bn2, self.relu)
self.block1 = Block(64, 128, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False)
if (mode == 'xception_71'):
self.block2_0 = Block(128, 256, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False, grow_first=True)
self.block2_1 = Block(256, 256, reps=2, stride=1, BatchNorm=BatchNorm, start_with_relu=False, is_last=True, grow_first=True, skip='conv')
self.block2 = nn.Sequential(self.block2_0, self.block2_1)
elif (mode == 'xception_65'):
self.block2 = Block(128, 256, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False, grow_first=True)
if (mode == 'xception_71'):
self.block3_0 = Block(256, 728, reps=3, stride=entry_block3_stride, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block3_1 = Block(728, 728, reps=2, stride=1, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True, is_last=True, skip='conv')
self.block3 = nn.Sequential(self.block3_0, self.block3_1)
if (mode == 'xception_65'):
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True, is_last=True)
self.entry_flow = nn.Sequential(self.conv1, self.bn1, self.relu, self.conv2, self.bn2, self.block1, self.block2, self.block3)
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0], BatchNorm=BatchNorm, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn3 = BatchNorm(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn4 = BatchNorm(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn5 = BatchNorm(2048)
self.exit_flow = nn.Sequential(self.block20, self.conv3, self.bn3, self.conv4, self.bn4, self.conv5, self.bn5)
self._init_weight()
if pretrained:
self._load_pretrained_model()
def forward(self, x):
x = self.block0_0(x)
x = self.block0_1(x)
x = self.block1(x)
x = self.relu(x)
x = self.block2(x)
low_level_feat = x
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return (x, low_level_feat)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for (k, v) in pretrain_dict.items():
if (k in model_dict):
if ('pointwise' in k):
v = v.unsqueeze((- 1)).unsqueeze((- 1))
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
|
def SeparateConv(C_in, C_out, kernel_size, stride=1, padding=0, dilation=1, bias=False, BatchNorm=ABN):
return nn.Sequential(nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, groups=C_in, bias=False), BatchNorm(C_in), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), BatchNorm(C_out))
|
class Decoder(nn.Module):
def __init__(self, num_classes, backbone, BatchNorm, args, separate):
super(Decoder, self).__init__()
if ((backbone == 'resnet') or (backbone == 'drn')):
low_level_inplanes = 256
elif (backbone == 'xception'):
low_level_inplanes = 256
elif (backbone == 'mobilenet'):
low_level_inplanes = 24
elif (backbone == 'autodeeplab'):
low_level_inplanes = (args.filter_multiplier * args.steps)
else:
raise NotImplementedError
self.conv_feature = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)
self.bn1 = BatchNorm(48)
self.feature_projection = nn.Sequential(self.conv_feature, self.bn1)
concate_channel = (48 + 256)
if separate:
self.conv1 = nn.Sequential(SeparateConv(concate_channel, 256, kernel_size=3, stride=1, padding=1, bias=False, BatchNorm=BatchNorm), nn.Dropout(0.5))
self.conv2 = nn.Sequential(SeparateConv(256, 256, kernel_size=3, stride=1, padding=1, bias=False, BatchNorm=BatchNorm), nn.Dropout(0.1))
else:
self.conv1 = nn.Sequential(nn.Conv2d(concate_channel, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm(256))
self.conv2 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm(256))
self.last_linear = nn.Conv2d(256, num_classes, kernel_size=1, stride=1)
self._init_weight()
def forward(self, x, low_level_feat):
low_level_feat = self.feature_projection(low_level_feat)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, low_level_feat), dim=1)
x = self.conv1(x)
x = self.conv2(x)
x = self.last_linear(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if (not (ly.bias is None)):
nn.init.constant_(ly.bias, 0)
|
def build_decoder(num_classes, backbone, BatchNorm, args, separate):
return Decoder(num_classes, backbone, BatchNorm, args, separate)
|
class DeepLab(nn.Module):
def __init__(self, backbone='resnet', output_stride=16, num_classes=19, use_ABN=True, freeze_bn=False, args=None, separate=False):
super(DeepLab, self).__init__()
if (backbone == 'drn'):
output_stride = 8
if use_ABN:
BatchNorm = ABN
else:
BatchNorm = NaiveBN
self.backbone = build_backbone(backbone, output_stride, BatchNorm, args)
self.aspp = build_aspp(backbone, output_stride, BatchNorm, args, separate)
self.decoder = build_decoder(num_classes, backbone, BatchNorm, args, separate)
if freeze_bn:
self.freeze_bn()
def forward(self, input_feature):
(x, low_level_feat) = self.backbone(input_feature)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
x = F.interpolate(x, size=input_feature.shape[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, ABN):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if (isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) or isinstance(m[1], nn.BatchNorm2d)):
for p in m[1].parameters():
if p.requires_grad:
(yield p)
def get_10x_lr_params(self):
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if (isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) or isinstance(m[1], nn.BatchNorm2d)):
for p in m[1].parameters():
if p.requires_grad:
(yield p)
|
class ABN(nn.Module):
'Activated Batch Normalization\n\n This gathers a `BatchNorm2d` and an activation function in a single module\n '
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, activation='leaky_relu', slope=0.01):
'Creates an Activated Batch Normalization module\n\n Parameters\n ----------\n num_features : int\n Number of feature channels in the input and output.\n eps : float\n Small constant to prevent numerical issues.\n momentum : float\n Momentum factor applied to compute running statistics as.\n affine : bool\n If `True` apply learned scale and shift transformation after normalization.\n activation : str\n Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.\n slope : float\n Negative slope for the `leaky_relu` activation.\n '
super(ABN, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
self.activation = activation
self.slope = slope
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.running_mean, 0)
nn.init.constant_(self.running_var, 1)
if self.affine:
nn.init.constant_(self.weight, 1)
nn.init.constant_(self.bias, 0)
def forward(self, x):
x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps)
if (self.activation == ACT_RELU):
return functional.relu(x, inplace=True)
elif (self.activation == ACT_LEAKY_RELU):
return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
elif (self.activation == ACT_ELU):
return functional.elu(x, inplace=True)
else:
return x
def __repr__(self):
rep = '{name}({num_features}, eps={eps}, momentum={momentum}, affine={affine}, activation={activation}'
if (self.activation == 'leaky_relu'):
rep += ', slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__)
|
class InPlaceABN(ABN):
'InPlace Activated Batch Normalization'
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, activation='leaky_relu', slope=0.01):
'Creates an InPlace Activated Batch Normalization module\n\n Parameters\n ----------\n num_features : int\n Number of feature channels in the input and output.\n eps : float\n Small constant to prevent numerical issues.\n momentum : float\n Momentum factor applied to compute running statistics as.\n affine : bool\n If `True` apply learned scale and shift transformation after normalization.\n activation : str\n Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.\n slope : float\n Negative slope for the `leaky_relu` activation.\n '
super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope)
def forward(self, x):
return inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope)
|
class InPlaceABNSync(ABN):
'InPlace Activated Batch Normalization with cross-GPU synchronization\n This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DistributedDataParallel`.\n '
def forward(self, x):
return inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope)
def __repr__(self):
rep = '{name}({num_features}, eps={eps}, momentum={momentum}, affine={affine}, activation={activation}'
if (self.activation == 'leaky_relu'):
rep += ', slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__)
|
def _check(fn, *args, **kwargs):
success = fn(*args, **kwargs)
if (not success):
raise RuntimeError('CUDA Error encountered in {}'.format(fn))
|
def _broadcast_shape(x):
out_size = []
for (i, s) in enumerate(x.size()):
if (i != 1):
out_size.append(1)
else:
out_size.append(s)
return out_size
|
def _reduce(x):
if (len(x.size()) == 2):
return x.sum(dim=0)
else:
(n, c) = x.size()[0:2]
return x.contiguous().view((n, c, (- 1))).sum(2).sum(0)
|
def _count_samples(x):
count = 1
for (i, s) in enumerate(x.size()):
if (i != 1):
count *= s
return count
|
def _act_forward(ctx, x):
if (ctx.activation == ACT_LEAKY_RELU):
_backend.leaky_relu_forward(x, ctx.slope)
elif (ctx.activation == ACT_ELU):
_backend.elu_forward(x)
elif (ctx.activation == ACT_NONE):
pass
|
def _act_backward(ctx, x, dx):
if (ctx.activation == ACT_LEAKY_RELU):
_backend.leaky_relu_backward(x, dx, ctx.slope)
elif (ctx.activation == ACT_ELU):
_backend.elu_backward(x, dx)
elif (ctx.activation == ACT_NONE):
pass
|
class InPlaceABN(autograd.Function):
@staticmethod
def forward(ctx, x, weight, bias, running_mean, running_var, training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01):
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slope = slope
ctx.affine = ((weight is not None) and (bias is not None))
count = _count_samples(x)
x = x.contiguous()
weight = (weight.contiguous() if ctx.affine else x.new_empty(0))
bias = (bias.contiguous() if ctx.affine else x.new_empty(0))
if ctx.training:
(mean, var) = _backend.mean_var(x)
running_mean.mul_((1 - ctx.momentum)).add_((ctx.momentum * mean))
running_var.mul_((1 - ctx.momentum)).add_((((ctx.momentum * var) * count) / (count - 1)))
ctx.mark_dirty(x, running_mean, running_var)
else:
(mean, var) = (running_mean.contiguous(), running_var.contiguous())
ctx.mark_dirty(x)
_backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps)
_act_forward(ctx, x)
ctx.var = var
ctx.save_for_backward(x, var, weight, bias)
return x
@staticmethod
@once_differentiable
def backward(ctx, dz):
(z, var, weight, bias) = ctx.saved_tensors
dz = dz.contiguous()
_act_backward(ctx, z, dz)
if ctx.training:
(edz, eydz) = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps)
else:
edz = dz.new_zeros(dz.size(1))
eydz = dz.new_zeros(dz.size(1))
dx = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps)
dweight = ((eydz * weight.sign()) if ctx.affine else None)
dbias = (edz if ctx.affine else None)
return (dx, dweight, dbias, None, None, None, None, None, None, None)
|
class InPlaceABNSync(autograd.Function):
@classmethod
def forward(cls, ctx, x, weight, bias, running_mean, running_var, training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01, equal_batches=True):
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slope = slope
ctx.affine = ((weight is not None) and (bias is not None))
ctx.world_size = (dist.get_world_size() if dist.is_initialized() else 1)
batch_size = x.new_tensor([x.shape[0]], dtype=torch.long)
x = x.contiguous()
weight = (weight.contiguous() if ctx.affine else x.new_empty(0))
bias = (bias.contiguous() if ctx.affine else x.new_empty(0))
if ctx.training:
(mean, var) = _backend.mean_var(x)
if (ctx.world_size > 1):
if equal_batches:
batch_size *= ctx.world_size
else:
dist.all_reduce(batch_size, dist.ReduceOp.SUM)
ctx.factor = (x.shape[0] / float(batch_size.item()))
mean_all = (mean.clone() * ctx.factor)
dist.all_reduce(mean_all, dist.ReduceOp.SUM)
var_all = ((var + ((mean - mean_all) ** 2)) * ctx.factor)
dist.all_reduce(var_all, dist.ReduceOp.SUM)
mean = mean_all
var = var_all
running_mean.mul_((1 - ctx.momentum)).add_((ctx.momentum * mean))
count = (batch_size.item() * x.view(x.shape[0], x.shape[1], (- 1)).shape[(- 1)])
running_var.mul_((1 - ctx.momentum)).add_(((ctx.momentum * var) * (float(count) / (count - 1))))
ctx.mark_dirty(x, running_mean, running_var)
else:
(mean, var) = (running_mean.contiguous(), running_var.contiguous())
ctx.mark_dirty(x)
_backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps)
_act_forward(ctx, x)
ctx.var = var
ctx.save_for_backward(x, var, weight, bias)
return x
@staticmethod
@once_differentiable
def backward(ctx, dz):
(z, var, weight, bias) = ctx.saved_tensors
dz = dz.contiguous()
_act_backward(ctx, z, dz)
if ctx.training:
(edz, eydz) = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps)
edz_local = edz.clone()
eydz_local = eydz.clone()
if (ctx.world_size > 1):
edz *= ctx.factor
dist.all_reduce(edz, dist.ReduceOp.SUM)
eydz *= ctx.factor
dist.all_reduce(eydz, dist.ReduceOp.SUM)
else:
edz_local = edz = dz.new_zeros(dz.size(1))
eydz_local = eydz = dz.new_zeros(dz.size(1))
dx = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps)
dweight = ((eydz_local * weight.sign()) if ctx.affine else None)
dbias = (edz_local if ctx.affine else None)
return (dx, dweight, dbias, None, None, None, None, None, None, None)
|
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"Global average pooling over the input's spatial dimensions"
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], (- 1))).mean(dim=2)
|
class SingleGPU(nn.Module):
def __init__(self, module):
super(SingleGPU, self).__init__()
self.module = module
def forward(self, input):
return self.module(input.cuda(non_blocking=True))
|
def _sum_ft(tensor):
'sum over the first and last dimention'
return tensor.sum(dim=0).sum(dim=(- 1))
|
def _unsqueeze_ft(tensor):
'add new dementions at the front and the tail'
return tensor.unsqueeze(0).unsqueeze((- 1))
|
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
if (not (self._is_parallel and self.training)):
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps)
input_shape = input.size()
input = input.view(input.size(0), self.num_features, (- 1))
sum_size = (input.size(0) * input.size(2))
input_sum = _sum_ft(input)
input_ssum = _sum_ft((input ** 2))
if (self._parallel_id == 0):
(mean, inv_std) = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
(mean, inv_std) = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
if self.affine:
output = (((input - _unsqueeze_ft(mean)) * _unsqueeze_ft((inv_std * self.weight))) + _unsqueeze_ft(self.bias))
else:
output = ((input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std))
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
if (self._parallel_id == 0):
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
'Reduce the sum and square-sum, compute the statistics, and broadcast it.'
intermediates = sorted(intermediates, key=(lambda i: i[1].sum.get_device()))
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i]
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
(sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
(mean, inv_std) = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for (i, rec) in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[(i * 2):((i * 2) + 2)])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
'Compute the mean and standard-deviation with sum and square-sum. This method\n also maintains the moving average on the master device.'
assert (size > 1), 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = (sum_ / size)
sumvar = (ssum - (sum_ * mean))
unbias_var = (sumvar / (size - 1))
bias_var = (sumvar / size)
self.running_mean = (((1 - self.momentum) * self.running_mean) + (self.momentum * mean.data))
self.running_var = (((1 - self.momentum) * self.running_var) + (self.momentum * unbias_var.data))
return (mean, (bias_var.clamp(self.eps) ** (- 0.5)))
|
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
"Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a\n mini-batch.\n .. math::\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n This module differs from the built-in PyTorch BatchNorm1d as the mean and\n standard-deviation are reduced across all devices during training.\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n\n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n During evaluation, this running mean/variance is used for normalization.\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm\n Args:\n num_features: num_features from an expected input of size\n `batch_size x num_features [x width]`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n Shape:\n - Input: :math:`(N, C)` or :math:`(N, C, L)`\n - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm1d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm1d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if ((input.dim() != 2) and (input.dim() != 3)):
raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
|
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
"Applies Batch Normalization over a 4d input that is seen as a mini-batch\n of 3d inputs\n .. math::\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n This module differs from the built-in PyTorch BatchNorm2d as the mean and\n standard-deviation are reduced across all devices during training.\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n\n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n During evaluation, this running mean/variance is used for normalization.\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm\n Args:\n num_features: num_features from an expected input of\n size batch_size x num_features x height x width\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm2d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm2d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
|
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
"Applies Batch Normalization over a 5d input that is seen as a mini-batch\n of 4d inputs\n .. math::\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n This module differs from the built-in PyTorch BatchNorm3d as the mean and\n standard-deviation are reduced across all devices during training.\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n\n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n During evaluation, this running mean/variance is used for normalization.\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm\n or Spatio-temporal BatchNorm\n Args:\n num_features: num_features from an expected input of\n size batch_size x num_features x depth x height x width\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n Shape:\n - Input: :math:`(N, C, D, H, W)`\n - Output: :math:`(N, C, D, H, W)` (same shape as input)\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm3d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm3d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if (input.dim() != 5):
raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
|
class FutureResult(object):
'A thread-safe future implementation. Used only as one-to-one pipe.'
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert (self._result is None), "Previous result has't been fetched."
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if (self._result is None):
self._cond.wait()
res = self._result
self._result = None
return res
|
class SlavePipe(_SlavePipeBase):
'Pipe for master-slave communication.'
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
|
class SyncMaster(object):
'An abstract `SyncMaster` object.\n - During the replication, as the data parallel will trigger an callback of each module, all slave devices should\n call `register(id)` and obtain an `SlavePipe` to communicate with the master.\n - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,\n and passed to a registered callback.\n - After receiving the messages, the master device should gather the information and determine to message passed\n back to each slave devices.\n '
def __init__(self, master_callback):
'\n Args:\n master_callback: a callback to be invoked after having collected messages from slave devices.\n '
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
'\n Register an slave device.\n Args:\n identifier: an identifier, usually is the device id.\n Returns: a `SlavePipe` object which can be used to communicate with the master device.\n '
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
'\n Main entry for the master device in each forward pass.\n The messages were first collected from each devices (including the master device), and then\n an callback will be invoked to compute the message to be sent back to each devices\n (including the master device).\n Args:\n master_msg: the message that the master want to send to itself. This will be placed as the first\n message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.\n Returns: the message to be sent back to the master device.\n '
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert (results[0][0] == 0), 'The first result should belongs to the master.'
for (i, res) in results:
if (i == 0):
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert (self._queue.get() is True)
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
|
class CallbackContext(object):
pass
|
def execute_replication_callbacks(modules):
'\n Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n Note that, as all modules are isomorphism, we assign each sub-module with a context\n (shared among multiple copies of this module on different devices).\n Through this context, different copies can share some information.\n We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback\n of any slave copies.\n '
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for (i, module) in enumerate(modules):
for (j, m) in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
|
class DataParallelWithCallback(DataParallel):
'\n Data Parallel with a replication callback.\n An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by\n original `replicate` function.\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n # sync_bn.__data_parallel_replicate__ will be invoked.\n '
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
|
def patch_replication_callback(data_parallel):
'\n Monkey-patch an existing `DataParallel` object. Add the replication callback.\n Useful when you have customized `DataParallel` implementation.\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])\n > patch_replication_callback(sync_bn)\n # this is equivalent to\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n '
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
|
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
|
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=0.001, rtol=0.001):
(npa, npb) = (as_numpy(a), as_numpy(b))
self.assertTrue(np.allclose(npa, npb, atol=atol), 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs((npa - npb)).max(), np.abs(((npa - npb) / np.fmax(npa, 1e-05))).max()))
|
class Path(object):
@staticmethod
def db_root_dir(dataset):
if (dataset == 'pascal'):
return '/path/to/datasets/VOCdevkit/VOC2012/'
elif (dataset == 'sbd'):
return '/path/to/datasets/benchmark_RELEASE/'
elif (dataset == 'cityscapes'):
return '/path/to/datasets/cityscapes/'
elif (dataset == 'coco'):
return '/path/to/datasets/coco/'
else:
print('Dataset {} not available.'.format(dataset))
raise NotImplementedError
|
class ASPP(nn.Module):
def __init__(self, C, depth, num_classes, conv=nn.Conv2d, norm=NaiveBN, momentum=0.0003, mult=1):
super(ASPP, self).__init__()
self._C = C
self._depth = depth
self._num_classes = num_classes
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.aspp1 = conv(C, depth, kernel_size=1, stride=1, bias=False)
self.aspp2 = conv(C, depth, kernel_size=3, stride=1, dilation=int((6 * mult)), padding=int((6 * mult)), bias=False)
self.aspp3 = conv(C, depth, kernel_size=3, stride=1, dilation=int((12 * mult)), padding=int((12 * mult)), bias=False)
self.aspp4 = conv(C, depth, kernel_size=3, stride=1, dilation=int((18 * mult)), padding=int((18 * mult)), bias=False)
self.aspp5 = conv(C, depth, kernel_size=1, stride=1, bias=False)
self.aspp1_bn = norm(depth, momentum)
self.aspp2_bn = norm(depth, momentum)
self.aspp3_bn = norm(depth, momentum)
self.aspp4_bn = norm(depth, momentum)
self.aspp5_bn = norm(depth, momentum)
self.conv2 = conv((depth * 5), depth, kernel_size=1, stride=1, bias=False)
self.bn2 = norm(depth, momentum)
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x)
x1 = self.aspp1_bn(x1)
x2 = self.aspp2(x)
x2 = self.aspp2_bn(x2)
x3 = self.aspp3(x)
x3 = self.aspp3_bn(x3)
x4 = self.aspp4(x)
x4 = self.aspp4_bn(x4)
x5 = self.global_pooling(x)
x5 = self.aspp5(x5)
x5 = self.aspp5_bn(x5)
x5 = nn.Upsample((x.shape[2], x.shape[3]), mode='bilinear', align_corners=True)(x5)
x = torch.cat((x1, x2, x3, x4, x5), 1)
x = self.conv2(x)
x = self.bn2(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
class Retrain_Autodeeplab(nn.Module):
def __init__(self, args, input_channels=3):
super(Retrain_Autodeeplab, self).__init__()
filter_param_dict = {0: 1, 1: 2, 2: 4, 3: 8}
BatchNorm2d = (ABN if args.use_ABN else NaiveBN)
if (((not args.dist) and args.use_ABN) or (args.dist and args.use_ABN and (dist.get_rank() == 0))):
print('=> use ABN!')
if ((args.net_arch is not None) and (args.cell_arch is not None)):
(net_arch, cell_arch) = (np.load(args.net_arch), np.load(args.cell_arch))
else:
network_arch = np.load(os.path.join(args.exp, 'network_path_space.npy'))
cell_arch = np.load(os.path.join(args.exp, 'genotype.npy'))
network_path = np.load(os.path.join(args.exp, 'network_path.npy'))
self.encoder = newModel(network_arch, cell_arch, args.num_classes, 12, args.filter_multiplier, BatchNorm=BatchNorm2d, args=args, input_channels=input_channels)
self.aspp = ASPP(((args.filter_multiplier * args.block_multiplier) * filter_param_dict[network_path[(- 1)]]), 256, args.num_classes, conv=nn.Conv2d, norm=BatchNorm2d)
self.decoder = Decoder(args.num_classes, filter_multiplier=((args.filter_multiplier * args.block_multiplier) * filter_param_dict[network_path[2]]), args=args, last_level=network_path[(- 1)])
def forward(self, x):
(encoder_output, low_level_feature) = self.encoder(x)
high_level_feature = self.aspp(encoder_output)
decoder_output = self.decoder(high_level_feature, low_level_feature)
return nn.Upsample((x.shape[2], x.shape[3]), mode='bilinear', align_corners=True)(decoder_output)
def get_params(self):
(back_bn_params, back_no_bn_params) = self.encoder.get_params()
tune_wd_params = ((list(self.aspp.parameters()) + list(self.decoder.parameters())) + back_no_bn_params)
no_tune_wd_params = back_bn_params
return (tune_wd_params, no_tune_wd_params)
|
class Decoder(nn.Module):
def __init__(self, num_classes, filter_multiplier, BatchNorm=NaiveBN, args=None, last_level=0):
super(Decoder, self).__init__()
low_level_inplanes = filter_multiplier
C_low = 48
self.conv1 = nn.Conv2d(low_level_inplanes, C_low, 1, bias=False)
self.bn1 = BatchNorm(48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm(256), nn.Dropout(0.5), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm(256), nn.Dropout(0.1), nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
self._init_weight()
def forward(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, low_level_feat), dim=1)
x = self.last_conv(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
class TensorboardSummary(object):
def __init__(self, directory):
self.directory = directory
def create_summary(self):
writer = SummaryWriter(log_dir=os.path.join(self.directory))
return writer
def visualize_image(self, writer, dataset, image, target, output, global_step):
grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step)
|
def calculate_weigths_labels(dataset, dataloader, num_classes):
z = np.zeros((num_classes,))
tqdm_batch = tqdm(dataloader)
print('Calculating classes weights')
for sample in tqdm_batch:
y = sample['label']
y = y.detach().cpu().numpy()
mask = ((y >= 0) & (y < num_classes))
labels = y[mask].astype(np.uint8)
count_l = np.bincount(labels, minlength=num_classes)
z += count_l
tqdm_batch.close()
total_frequency = np.sum(z)
class_weights = []
for frequency in z:
class_weight = (1 / np.log((1.02 + (frequency / total_frequency))))
class_weights.append(class_weight)
ret = np.array(class_weights)
classes_weights_path = os.path.join(Path.db_root_dir(dataset), (dataset + '_classes_weights.npy'))
np.save(classes_weights_path, ret)
return ret
|
def copy_state_dict(cur_state_dict, pre_state_dict, prefix=''):
def _get_params(key):
key = (prefix + key)
if (key in pre_state_dict):
return pre_state_dict[key]
return None
for k in cur_state_dict.keys():
v = _get_params(k)
try:
if (v is None):
print('parameter {} not found'.format(k))
continue
cur_state_dict[k].copy_(v)
except:
print('copy param {} failed'.format(k))
continue
|
def setup_logger(logpth):
logfile = 'Deeplab_v3plus-{}.log'.format(time.strftime('%Y-%m-%d-%H-%M-%S'))
logfile = osp.join(logpth, logfile)
FORMAT = '%(levelname)s %(filename)s(%(lineno)d): %(message)s'
log_level = logging.INFO
if (dist.is_initialized() and (dist.get_rank() != 0)):
log_level = logging.WARNING
logging.basicConfig(level=log_level, format=FORMAT, filename=logfile)
logging.root.addHandler(logging.StreamHandler())
|
class Logger(object):
def __init__(self, args, logger_str):
self._logger_name = args.save_path
self._logger_str = logger_str
self._save_path = os.path.join(self._logger_name, (self._logger_str + '.txt'))
self._file = open(self._save_path, 'w')
def log(self, string, save=True):
print(string)
if save:
self._file.write('{:}\n'.format(string))
self._file.flush()
def close(self):
self._file.close()
|
class SegmentationLosses(object):
def __init__(self, weight=None, size_average=True, batch_average=True, ignore_index=255, cuda=False):
self.ignore_index = ignore_index
self.weight = weight
self.size_average = size_average
self.cuda = cuda
def build_loss(self, mode='ce'):
"Choices: ['ce' or 'focal']"
if (mode == 'ce'):
return self.CrossEntropyLoss
elif (mode == 'focal'):
return self.FocalLoss
else:
raise NotImplementedError
def CrossEntropyLoss(self, logit, target):
(n, c, h, w) = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index, reduction='mean')
if self.cuda:
criterion = criterion.cuda()
loss = criterion(logit, target.long())
return loss
def FocalLoss(self, logit, target, gamma=2, alpha=0.5):
(n, c, h, w) = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index, reduction='mean')
if self.cuda:
criterion = criterion.cuda()
logpt = (- criterion(logit, target.long()))
pt = torch.exp(logpt)
if (alpha is not None):
logpt *= alpha
loss = ((- ((1 - pt) ** gamma)) * logpt)
if self.batch_average:
loss /= n
return loss
|
class OhemCELoss(nn.Module):
def __init__(self, thresh, n_min, ignore_index=255, cuda=False, *args, **kwargs):
super(OhemCELoss, self).__init__()
self.thresh = thresh
self.n_min = n_min
self.ignore_lb = ignore_index
self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_index)
if cuda:
self.criteria = self.criteria.cuda()
def forward(self, logits, labels):
(N, C, H, W) = logits.size()
n_pixs = ((N * H) * W)
logits = logits.permute(0, 2, 3, 1).contiguous().view((- 1), C)
labels = labels.view((- 1))
with torch.no_grad():
scores = F.softmax(logits, dim=1)
labels_cpu = labels
invalid_mask = (labels_cpu == self.ignore_lb)
labels_cpu[invalid_mask] = 0
picks = scores[(torch.arange(n_pixs), labels_cpu)]
picks[invalid_mask] = 1
(sorteds, _) = torch.sort(picks)
thresh = (self.thresh if (sorteds[self.n_min] < self.thresh) else sorteds[self.n_min])
labels[(picks > thresh)] = self.ignore_lb
labels = labels.clone()
loss = self.criteria(logits, labels)
return loss
|
def build_criterion(args):
print('=> Trying bulid {:}loss'.format(args.criterion))
if (args.criterion == 'Ohem'):
return OhemCELoss(thresh=args.thresh, n_min=args.n_min, cuda=True)
elif (args.criterion == 'crossentropy'):
return SegmentationLosses(weight=args.weight, cuda=True).build_loss(args.mode)
else:
raise ValueError('unknown criterion : {:}'.format(args.criterion))
|
class LR_Scheduler(object):
'Learning Rate Scheduler\n\n Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}``\n\n Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``\n\n Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``\n\n Args:\n args:\n :attr:`args.lr_scheduler` lr scheduler mode (`cos`, `poly`),\n :attr:`args.lr` base learning rate, :attr:`args.epochs` number of epochs,\n :attr:`args.lr_step`\n\n iters_per_epoch: number of iterations per epoch\n '
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0, lr_step=0, warmup_epochs=0, min_lr=None):
self.mode = mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = base_lr
if (mode == 'step'):
assert lr_step
self.lr_step = lr_step
self.iters_per_epoch = iters_per_epoch
self.N = (num_epochs * iters_per_epoch)
self.epoch = (- 1)
self.warmup_iters = (warmup_epochs * iters_per_epoch)
self.min_lr = min_lr
def __call__(self, optimizer, i, epoch, best_pred):
T = ((epoch * self.iters_per_epoch) + i)
if (self.mode == 'cos'):
lr = ((0.5 * self.lr) * (1 + math.cos((((1.0 * T) / self.N) * math.pi))))
elif (self.mode == 'poly'):
lr = (self.lr * pow((1 - ((1.0 * T) / self.N)), 0.9))
elif (self.mode == 'step'):
lr = (self.lr * (0.1 ** (epoch // self.lr_step)))
else:
raise NotImplemented
if (self.min_lr is not None):
if (lr < self.min_lr):
lr = self.min_lr
if ((self.warmup_iters > 0) and (T < self.warmup_iters)):
lr = (((lr * 1.0) * T) / self.warmup_iters)
if (epoch > self.epoch):
print(('\n=>Epoches %i, learning rate = %.4f, previous best = %.4f' % (epoch, lr, best_pred)))
self.epoch = epoch
assert (lr >= 0)
self._adjust_learning_rate(optimizer, lr)
def _adjust_learning_rate(self, optimizer, lr):
if (len(optimizer.param_groups) == 1):
optimizer.param_groups[0]['lr'] = lr
else:
optimizer.param_groups[0]['lr'] = lr
for i in range(1, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = (lr * 10)
|
class Evaluator(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros(((self.num_class,) * 2))
def Pixel_Accuracy(self):
Acc = (np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum())
return Acc
def Pixel_Accuracy_Class(self):
Acc = (np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1))
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = (np.diag(self.confusion_matrix) / ((np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0)) - np.diag(self.confusion_matrix)))
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = (np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix))
iu = (np.diag(self.confusion_matrix) / ((np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0)) - np.diag(self.confusion_matrix)))
FWIoU = (freq[(freq > 0)] * iu[(freq > 0)]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = ((gt_image >= 0) & (gt_image < self.num_class))
label = ((self.num_class * gt_image[mask].astype('int')) + pre_image[mask])
count = np.bincount(label, minlength=(self.num_class ** 2))
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image):
assert (gt_image.shape == pre_image.shape)
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros(((self.num_class,) * 2))
|
class Optimizer(object):
def __init__(self, model, lr0, momentum, wd, warmup_steps, warmup_start_lr, max_iter, power):
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr
self.lr0 = lr0
self.lr = self.lr0
self.max_iter = float(max_iter)
self.power = power
self.it = 0
self.optim = torch.optim.SGD(model.parameters(), lr=lr0, momentum=momentum, weight_decay=wd)
self.warmup_factor = ((self.lr0 / self.warmup_start_lr) ** (1.0 / self.warmup_steps))
def get_lr(self):
if (self.it <= self.warmup_steps):
lr = (self.warmup_start_lr * (self.warmup_factor ** self.it))
else:
factor = ((1 - ((self.it - self.warmup_steps) / (self.max_iter - self.warmup_steps))) ** self.power)
lr = (self.lr0 * factor)
return lr
def step(self):
self.lr = self.get_lr()
for pg in self.optim.param_groups:
pg['lr'] = self.lr
self.optim.defaults['lr'] = self.lr
self.it += 1
self.optim.step()
if ((self.it == (self.warmup_steps + 2)) and (dist.get_rank() == 0)):
logger.info('==> warmup done, start to implement poly lr strategy')
def load_state_dict(self, optimizer, iteration):
self.it = (iteration if (iteration is not None) else 0)
self.optim.load_state_dict(optimizer)
def zero_grad(self):
self.optim.zero_grad()
|
class Saver(object):
def __init__(self, args, use_dist=False):
self.args = args
self.use_dist = use_dist
self.directory = os.path.join('run', args.dataset, args.checkname)
self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_*')))
run_id = ((max([int(x.split('_')[(- 1)]) for x in self.runs]) + 1) if self.runs else 0)
self.experiment_dir = os.path.join(self.directory, 'experiment_{}'.format(str(run_id)))
if (not os.path.exists(self.experiment_dir)):
os.makedirs(self.experiment_dir)
def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):
'Saves checkpoint to disk'
filename = os.path.join(self.experiment_dir, filename)
if ((self.use_dist and (dist.get_rank() == 0)) or (not self.use_dist)):
torch.save(state, filename)
if is_best:
best_pred = state['best_loss']
with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:
f.write(str(best_pred))
if self.runs:
previous_miou = [0.0]
for run in self.runs:
run_id = run.split('_')[(- 1)]
path = os.path.join(self.directory, 'experiment_{}'.format(str(run_id)), 'best_pred.txt')
if os.path.exists(path):
with open(path, 'r') as f:
miou = float(f.readline())
previous_miou.append(miou)
else:
continue
max_miou = max(previous_miou)
if (best_pred > max_miou):
shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))
else:
shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))
def save_experiment_config(self):
if ((self.use_dist and (dist.get_rank() == 0)) or (not self.use_dist)):
logfile = os.path.join(self.experiment_dir, 'parameters.txt')
log_file = open(logfile, 'w')
p = OrderedDict()
p['datset'] = self.args.dataset
p['backbone'] = self.args.backbone
p['out_stride'] = self.args.out_stride
p['lr'] = self.args.lr
p['lr_scheduler'] = self.args.lr_scheduler
p['loss_type'] = self.args.loss_type
p['epoch'] = self.args.epochs
p['resize'] = self.args.resize
p['crop_size'] = self.args.crop_size
for (key, val) in p.items():
log_file.write((((key + ':') + str(val)) + '\n'))
log_file.close()
|
class Iter_LR_Scheduler(object):
'Learning Rate Scheduler\n\n Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}``\n\n Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``\n\n Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``\n\n Args:\n args:\n :attr:`args.lr_scheduler` lr scheduler mode (`cos`, `poly`),\n :attr:`args.lr` base learning rate, :attr:`args.epochs` number of epochs,\n :attr:`args.lr_step`\n\n iters_per_epoch: number of iterations per epoch\n '
def __init__(self, args, max_iteration, iters_per_epoch):
self.mode = args.mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = args.base_lr
self.lr_step = args.lr_step
self.iters_per_epoch = iters_per_epoch
self.max_iteration = max_iteration
self.epoch = (- 1)
self.warmup_iters = args.warmup_iters
self.min_lr = (args.min_lr if (args.min_lr is not None) else 0)
self.warmup_start_lr = args.warmup_start_lr
self.warmup_factor = ((self.lr / args.warmup_start_lr) ** (1.0 / args.warmup_iters))
def __call__(self, optimizer, iteration):
if ((self.warmup_iters > 0) and (iteration < self.warmup_iters)):
lr = (self.warmup_start_lr * (self.warmup_factor ** iteration))
elif (self.mode == 'cos'):
lr = ((0.5 * self.lr) * (1 + math.cos((((1.0 * iteration) / self.max_iteration) * math.pi))))
elif (self.mode == 'poly'):
lr = (self.lr * pow((1 - ((iteration - self.warmup_iters) / (self.max_iteration - self.warmup_iters))), 0.9))
elif (self.mode == 'step'):
print('Warning! Now the step decline lr exists some issue')
if (not self.lr_step):
raise NotImplementedError
epoch = (iteration // self.iters_per_epoch)
lr = (self.lr * (0.1 ** (epoch // self.lr_step)))
else:
raise NotImplemented
if (iteration == self.warmup_iters):
print('==> warmup done, start to implement poly lr strategy')
if ((not (iteration % self.iters_per_epoch)) and ((iteration // self.iters_per_epoch) > self.epoch)):
epoch = (iteration // self.iters_per_epoch)
print(('\n=>Epoches %i, learning rate = %.4f' % (epoch, lr)))
self.epoch = epoch
optimizer.param_groups[0]['lr'] = max(lr, self.min_lr)
def get_lr(self, optimizer):
return optimizer.param_groups[0]['lr']
|
class TensorboardSummary(object):
def __init__(self, directory, use_dist=False):
self.directory = directory
self.use_dist = use_dist
def create_summary(self):
writer = SummaryWriter(logdir=os.path.join(self.directory))
return writer
def visualize_image(self, writer, dataset, image, target, output, global_step):
if ((self.use_dist and (dist.get_rank() == 0)) or (not self.use_dist)):
grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step)
|
class AverageMeter(object):
def __init__(self):
self.val = None
self.sum = None
self.cnt = None
self.avg = None
self.ema = None
self.initialized = False
def update(self, val, n=1):
if (not self.initialized):
self.initialize(val, n)
else:
self.add(val, n)
def initialize(self, val, n):
self.val = val
self.sum = (val * n)
self.cnt = n
self.avg = val
self.ema = val
self.initialized = True
def add(self, val, n):
self.val = val
self.sum += (val * n)
self.cnt += n
self.avg = (self.sum / self.cnt)
self.ema = ((self.ema * 0.99) + (self.val * 0.01))
|
def inter_and_union(pred, mask, num_class):
pred = np.asarray(pred, dtype=np.uint8).copy()
mask = np.asarray(mask, dtype=np.uint8).copy()
pred += 1
mask += 1
pred = (pred * (mask > 0))
inter = (pred * (pred == mask))
(area_inter, _) = np.histogram(inter, bins=num_class, range=(1, num_class))
(area_pred, _) = np.histogram(pred, bins=num_class, range=(1, num_class))
(area_mask, _) = np.histogram(mask, bins=num_class, range=(1, num_class))
area_union = ((area_pred + area_mask) - area_inter)
return (area_inter, area_union)
|
def time_for_file():
ISOTIMEFORMAT = '%d-%h-at-%H-%M-%S'
return '{}'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
|
def prepare_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
|
def adam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[int], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float):
'Functional API that performs Adam algorithm computation.\n See :class:`~torch.optim.Adam` for details.\n '
for (i, param) in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = (1 - (beta1 ** step))
bias_correction2 = (1 - (beta2 ** step))
if (weight_decay != 0):
grad = grad.add(param, alpha=weight_decay)
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=(1 - beta2))
if amsgrad:
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
param.addcdiv_(exp_avg, denom, value=(- step_size))
|
class Adam(Optimizer):
'Implements Adam algorithm.\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n The implementation of the L2 penalty follows changes proposed in\n `Decoupled Weight Decay Regularization`_.\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
(beta1, beta2) = group['betas']
for p in group['params']:
if (p.grad is not None):
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
state['step'] += 1
state_steps.append(state['step'])
adam(params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=group['amsgrad'], beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'])
return loss
|
class BasicBlock(nn.Module):
'\n first applies batch norm and relu before applying convolution\n we can change the order of operations if needed\n '
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None)
def forward(self, x):
if (not self.equalInOut):
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1((out if self.equalInOut else x))))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add((x if self.equalInOut else self.convShortcut(x)), out)
|
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
|
class Backbone_Pt(nn.Module):
'\n wide resnet\n '
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, in_channels=3):
super(Backbone_Pt, self).__init__()
nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = ((depth - 4) / 6)
block = BasicBlock
self.conv1 = nn.Conv2d(in_channels, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
if ((out.shape[2] == 2) and (out.shape[3] == 8)):
out = F.avg_pool2d(out, (2, 8))
elif ((out.shape[3] == 13) and (out.shape[2] == 4)):
out = F.avg_pool2d(out, (4, 13))
elif (out.shape[2] == 6):
out = nn.AdaptiveAvgPool2d((1, 1))(out)
else:
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), (- 1))
return self.fc(out)
|
class Backbone_Audio(nn.Module):
'\n wide resnet for audio\n '
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, in_channels=3):
super(Backbone_Audio, self).__init__()
nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = ((depth - 4) / 6)
block = BasicBlock
self.conv1 = nn.Conv2d(in_channels, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = self.avgpool(out)
out = torch.flatten(out, 1)
return self.fc(out)
|
def download_from_s3(s3_bucket, task, download_dir):
s3 = boto3.client('s3')
if (task == 'smnist'):
data_files = ['s2_mnist.gz']
s3_folder = 'spherical'
if (task == 'scifar100'):
data_files = ['s2_cifar100.gz']
s3_folder = 'spherical'
elif (task == 'sEMG'):
data_files = ['trainval_Myo.pt', 'test_Myo.pt']
s3_folder = 'Myo'
elif (task == 'ninapro'):
data_files = ['ninapro_train.npy', 'ninapro_val.npy', 'ninapro_test.npy', 'label_train.npy', 'label_val.npy', 'label_test.npy']
s3_folder = 'ninapro'
elif ((task == 'cifar10') or (task == 'cifar100')):
return
elif (task == 'audio'):
data_files = ['audio.zip']
s3_folder = 'audio'
elif (task == 'EEG'):
data_files = ['train_data.npy', 'train_labels.npy', 'test_data.npy', 'test_labels.npy']
s3_folder = 'EEG'
else:
raise NotImplementedError
for data_file in data_files:
filepath = os.path.join(download_dir, data_file)
if (s3_folder is not None):
s3_path = os.path.join(s3_folder, data_file)
else:
s3_path = data_file
if (not os.path.exists(filepath)):
s3.download_file(s3_bucket, s3_path, filepath)
if ((task == 'audio') and (not os.path.exists(os.path.join(download_dir, 'data')))):
os.mkdir(os.path.join(download_dir, 'data'))
import zipfile
with zipfile.ZipFile(os.path.join(download_dir, 'audio.zip'), 'r') as zip_ref:
zip_ref.extractall(os.path.join(download_dir, 'data'))
return
|
def download_protein_folder(bucket_name, local_dir=None):
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix='protein'):
target = (obj.key if (local_dir is None) else os.path.join(local_dir, os.path.relpath(obj.key, 'protein')))
if (not os.path.exists(os.path.dirname(target))):
os.makedirs(os.path.dirname(target))
if (obj.key[(- 1)] == '/'):
continue
bucket.download_file(obj.key, target)
|
def accuracy_rate(predictions: torch.Tensor, labels: torch.Tensor) -> float:
'Return the accuracy rate based on dense predictions and sparse labels.'
assert (len(predictions) == len(labels)), 'Predictions and labels must have the same length.'
assert (len(labels.shape) == 1), 'Labels must be a column vector.'
return (float((predictions.argmax(1) == labels.to(torch.long)).sum()) / predictions.shape[0])
|
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
class BackboneTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3), 'audio': (200, 1)}
(n_classes, in_channels) = dataset_hypers[self.hparams.task]
print('task: ', self.hparams.task, 'in_channels: ', in_channels, 'classes: ', n_classes)
depth = list(map(int, self.hparams.backbone.split(',')))[0]
width = list(map(int, self.hparams.backbone.split(',')))[1]
if (self.hparams.task == 'audio'):
self.criterion = nn.BCEWithLogitsLoss().cuda()
self.backbone = Backbone_Audio(depth, n_classes, width, dropRate=self.hparams.droprate, in_channels=in_channels)
else:
self.criterion = nn.CrossEntropyLoss().cuda()
self.backbone = Backbone_Pt(depth, n_classes, width, dropRate=self.hparams.droprate, in_channels=in_channels)
total_params = (sum((p.numel() for p in self.backbone.parameters() if p.requires_grad)) / 1000000.0)
print('Parameter size in MB(backbone): ', total_params)
self.model = self.context.wrap_model(self.backbone)
self.last_eval = 0
'\n Definition of optimizer \n '
nesterov = (self.hparams.nesterov if self.hparams.momentum else False)
self.opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay, nesterov=nesterov))
self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=torch.optim.lr_scheduler.LambdaLR(self.opt, lr_lambda=self.weight_sched, last_epoch=(self.hparams.start_epoch - 1)), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH)
def weight_sched(self, epoch) -> Any:
if (self.hparams.epochs != 200):
return (((0.2 ** (epoch >= int((0.3 * self.hparams.epochs)))) * (0.2 ** (epoch > int((0.6 * self.hparams.epochs))))) * (0.2 ** (epoch > int((0.8 * self.hparams.epochs)))))
return (((0.2 ** (epoch >= 60)) * (0.2 ** (epoch >= 120))) * (0.2 ** (epoch >= 160)))
def download_data_from_s3(self):
'Download data from s3 to store in temp directory'
s3_bucket = self.context.get_data_config()['bucket']
download_directory = os.getcwd()
s3 = boto3.client('s3')
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if self.hparams.train:
(self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
self.build_test_data_loader(download_directory)
else:
(self.train_data, _, self.val_data) = load_data(self.hparams.task, download_directory, False, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
trainset = self.train_data
print(len(trainset))
train_loader = DataLoader(trainset, num_workers=4, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, sampler=None, collate_fn=_collate_fn, pin_memory=False, drop_last=True)
print(len(train_loader))
return train_loader
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
print(len(valset))
return DataLoader(valset, sampler=None, num_workers=4, collate_fn=_collate_fn_eval, shuffle=False, batch_size=1, pin_memory=False)
def build_test_data_loader(self, download_directory):
testset = self.test_data
print(len(testset))
return
'\n Train and Evaluate Methods\n '
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
(x_train, _, y_train) = batch
self.model.train()
output = self.model(x_train)
loss = self.criterion(output, y_train)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {'loss': loss}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
if ((not self.hparams.train) and (self.hparams.task == 'audio')):
return self.evaluate_audio_testset(self.val_data)
loss_avg = utils_pt.AverageMeter()
val_predictions = []
val_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
logits = logits.mean(0).unsqueeze(0)
loss = self.criterion(logits, target)
loss_avg.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0])
val_gts.append(target.detach().cpu().numpy()[0])
val_preds = np.asarray(val_predictions).astype('float32')
val_gts = np.asarray(val_gts).astype('int32')
map_value = average_precision_score(val_gts, val_preds, average='macro')
results = {'loss': loss_avg.avg, 'val_mAP': map_value}
'\n if self.hparams.train:\n test_acc_top1 = utils_pt.AverageMeter()\n test_acc_top5 = utils_pt.AverageMeter()\n test_loss = utils_pt.AverageMeter()\n with torch.no_grad():\n for batch in self.test_loader:\n batch = self.context.to_device(batch)\n input, target = batch\n n = input.size(0)\n logits = self.model(input)\n loss = self.criterion(logits, target)\n top1, top5 = utils_pt.accuracy(logits, target, topk=(1, 5))\n test_acc_top1.update(top1.item(), n)\n test_acc_top5.update(top5.item(), n)\n test_loss.update(loss, n)\n\n results2 = {\n "test_loss": test_loss.avg,\n "test_top1_accuracy": test_acc_top1.avg,\n "test_top5_accuracy": test_acc_top5.avg,\n }\n\n results.update(results2)\n '
if ((self.hparams.task == 'audio') and ((self.last_eval % 20) == 0)):
results.update(self.evaluate_audio_testset(self.test_data))
self.last_eval += 1
return results
def evaluate_audio_testset(self, testset) -> Dict[(str, torch.Tensor)]:
cnt = 0
test_predictions = []
test_gts = []
for ix in range(testset.len):
with torch.no_grad():
batch = testset[ix]
(x, y) = batch
x = x.cuda()
y_pred = self.model(x)
y_pred = y_pred.mean(0).unsqueeze(0)
sigmoid_preds = torch.sigmoid(y_pred)
test_predictions.append(sigmoid_preds.detach().cpu().numpy()[0])
test_gts.append(y.detach().cpu().numpy()[0])
test_predictions = np.asarray(test_predictions).astype('float32')
test_gts = np.asarray(test_gts).astype('int32')
stats = calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {'test_mAUC': mAUC, 'test_mAP': mAP}
return results
|
def accuracy_rate(predictions: torch.Tensor, labels: torch.Tensor) -> float:
'Return the accuracy rate based on dense predictions and sparse labels.'
assert (len(predictions) == len(labels)), 'Predictions and labels must have the same length.'
assert (len(labels.shape) == 1), 'Labels must be a column vector.'
return (float((predictions.argmax(1) == labels.to(torch.long)).sum()) / predictions.shape[0])
|
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
class BackboneTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
self.criterion = nn.CrossEntropyLoss().cuda()
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3), 'EEG': (4, 1)}
(n_classes, in_channels) = dataset_hypers[self.hparams.task]
print('task: ', self.hparams.task, 'in_channels: ', in_channels, 'classes: ', n_classes)
depth = list(map(int, self.hparams.backbone.split(',')))[0]
width = list(map(int, self.hparams.backbone.split(',')))[1]
self.backbone = Backbone_Pt(depth, n_classes, width, dropRate=self.hparams.droprate, in_channels=in_channels)
total_params = (sum((p.numel() for p in self.backbone.parameters() if p.requires_grad)) / 1000000.0)
print('Parameter size in MB(backbone): ', total_params)
self.model = self.context.wrap_model(self.backbone)
'\n Definition of optimizer \n '
nesterov = (self.hparams.nesterov if self.hparams.momentum else False)
self.opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay, nesterov=nesterov))
self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=torch.optim.lr_scheduler.LambdaLR(self.opt, lr_lambda=self.weight_sched, last_epoch=(self.hparams.start_epoch - 1)), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH)
def weight_sched(self, epoch) -> Any:
if (self.hparams.epochs != 200):
return (((0.2 ** (epoch >= int((0.3 * self.hparams.epochs)))) * (0.2 ** (epoch > int((0.6 * self.hparams.epochs))))) * (0.2 ** (epoch > int((0.8 * self.hparams.epochs)))))
return (((0.2 ** (epoch >= 60)) * (0.2 ** (epoch >= 120))) * (0.2 ** (epoch >= 160)))
def download_data_from_s3(self):
'Download data from s3 to store in temp directory'
s3_bucket = self.context.get_data_config()['bucket']
download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}'
s3 = boto3.client('s3')
os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if self.hparams.train:
(self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
self.build_test_data_loader(download_directory)
else:
(self.train_data, _, self.val_data) = load_data(self.hparams.task, download_directory, False, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
trainset = self.train_data
print(len(trainset))
return DataLoader(trainset, batch_size=self.context.get_per_slot_batch_size())
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
print(len(valset))
return DataLoader(valset, batch_size=self.context.get_per_slot_batch_size())
def build_test_data_loader(self, download_directory):
testset = self.test_data
print(len(testset))
self.test_loader = torch.utils.data.DataLoader(testset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2)
return
'\n Train and Evaluate Methods\n '
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
(x_train, y_train) = batch
print(x_train.shape)
print(y_train.shape)
self.model.train()
output = self.model(x_train)
loss = self.criterion(output, y_train)
top1 = utils_pt.accuracy(output, y_train, topk=(1,))[0]
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {'loss': loss, 'top1_accuracy': top1.item()}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
acc_top1 = utils_pt.AverageMeter()
acc_top5 = utils_pt.AverageMeter()
loss_avg = utils_pt.AverageMeter()
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
top1 = utils_pt.accuracy(logits, target, topk=(1,))[0]
acc_top1.update(top1.item(), n)
loss_avg.update(loss, n)
results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg}
if self.hparams.train:
test_acc_top1 = utils_pt.AverageMeter()
test_acc_top5 = utils_pt.AverageMeter()
test_loss = utils_pt.AverageMeter()
with torch.no_grad():
for batch in self.test_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
top1 = utils_pt.accuracy(logits, target, topk=(1,))[0]
test_acc_top1.update(top1.item(), n)
test_loss.update(loss, n)
results2 = {'test_loss': test_loss.avg, 'test_top1_accuracy': test_acc_top1.avg}
results.update(results2)
return results
|
class Conv1dSamePadding(nn.Conv1d):
'Represents the "Same" padding functionality from Tensorflow.\n See: https://github.com/pytorch/pytorch/issues/3867\n Note that the padding argument in the initializer doesn\'t do anything now\n '
def forward(self, input):
return conv1d_same_padding(input, self.weight, self.bias, self.stride, self.dilation, self.groups)
|
def conv1d_same_padding(input, weight, bias, stride, dilation, groups):
(kernel, dilation, stride) = (weight.size(2), dilation[0], stride[0])
l_out = l_in = input.size(2)
padding = (((((l_out - 1) * stride) - l_in) + (dilation * (kernel - 1))) + 1)
if ((padding % 2) != 0):
input = F.pad(input, [0, 1])
return F.conv1d(input=input, weight=weight, bias=bias, stride=stride, padding=(padding // 2), dilation=dilation, groups=groups)
|
class ConvBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int) -> None:
super().__init__()
self.layers = nn.Sequential(Conv1dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride), nn.BatchNorm1d(num_features=out_channels), nn.ReLU())
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.layers(x)
|
class ResNet1D(nn.Module):
'A PyTorch implementation of the ResNet Baseline\n From https://arxiv.org/abs/1909.04939\n Attributes\n ----------\n sequence_length:\n The size of the input sequence\n mid_channels:\n The 3 residual blocks will have as output channels:\n [mid_channels, mid_channels * 2, mid_channels * 2]\n num_pred_classes:\n The number of output classes\n '
def __init__(self, in_channels: int, mid_channels: int=64, num_pred_classes: int=1) -> None:
super().__init__()
self.input_args = {'in_channels': in_channels, 'num_pred_classes': num_pred_classes}
self.layers = nn.Sequential(*[ResNetBlock(in_channels=in_channels, out_channels=mid_channels), ResNetBlock(in_channels=mid_channels, out_channels=(mid_channels * 2)), ResNetBlock(in_channels=(mid_channels * 2), out_channels=(mid_channels * 2))])
self.final = nn.Linear((mid_channels * 2), num_pred_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.layers(x)
return self.final(x.mean(dim=(- 1)))
|
class ResNetBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
channels = [in_channels, out_channels, out_channels, out_channels]
kernel_sizes = [8, 5, 3]
self.layers = nn.Sequential(*[ConvBlock(in_channels=channels[i], out_channels=channels[(i + 1)], kernel_size=kernel_sizes[i], stride=1) for i in range(len(kernel_sizes))])
self.match_channels = False
if (in_channels != out_channels):
self.match_channels = True
self.residual = nn.Sequential(*[Conv1dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1), nn.BatchNorm1d(num_features=out_channels)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.match_channels:
return (self.layers(x) + self.residual(x))
return self.layers(x)
|
def download_from_s3(s3_bucket, task, download_dir):
s3 = boto3.client('s3')
if (task == 'ECG'):
data_files = ['challenge2017.pkl']
s3_folder = 'ECG'
elif (task == 'satellite'):
data_files = ['satellite_train.npy', 'satellite_test.npy']
s3_folder = 'satellite'
elif (task == 'deepsea'):
data_files = ['deepsea_filtered.npz']
s3_folder = 'deepsea'
else:
raise NotImplementedError
for data_file in data_files:
filepath = os.path.join(download_dir, data_file)
if (s3_folder is not None):
s3_path = os.path.join(s3_folder, data_file)
else:
s3_path = data_file
if (not os.path.exists(filepath)):
s3.download_file(s3_bucket, s3_path, filepath)
return
|
class ECGDataset(Dataset):
def __init__(self, data, label, pid=None):
self.data = data
self.label = label
self.pid = pid
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data)
|
def load_data(task, path, train=False):
if (task == 'ECG'):
return load_ECG_data(path, train)
elif (task == 'satellite'):
return load_satellite_data(path, train)
elif (task == 'deepsea'):
return load_deepsea_data(path, train)
else:
raise NotImplementedError
|
def load_ECG_data(path, train):
return (read_data_physionet_4_with_val(path) if train else read_data_physionet_4(path))
|
def load_satellite_data(path, train):
train_file = os.path.join(path, 'satellite_train.npy')
test_file = os.path.join(path, 'satellite_test.npy')
(all_train_data, all_train_labels) = (np.load(train_file, allow_pickle=True)[()]['data'], np.load(train_file, allow_pickle=True)[()]['label'])
(test_data, test_labels) = (np.load(test_file, allow_pickle=True)[()]['data'], np.load(test_file, allow_pickle=True)[()]['label'])
all_train_labels = (all_train_labels - 1)
test_labels = (test_labels - 1)
all_train_data = ((all_train_data - all_train_data.mean(axis=1, keepdims=True)) / all_train_data.std(axis=1, keepdims=True))
test_data = ((test_data - test_data.mean(axis=1, keepdims=True)) / test_data.std(axis=1, keepdims=True))
all_train_data = np.expand_dims(all_train_data, 1)
test_data = np.expand_dims(test_data, 1)
(all_train_tensors, all_train_labeltensor) = (torch.from_numpy(all_train_data).type(torch.FloatTensor), torch.from_numpy(all_train_labels).type(torch.LongTensor))
(test_tensors, test_labeltensor) = (torch.from_numpy(test_data).type(torch.FloatTensor), torch.from_numpy(test_labels).type(torch.LongTensor))
testset = data_utils.TensorDataset(test_tensors, test_labeltensor)
if train:
len_val = len(test_data)
(train_tensors, train_labeltensor) = (all_train_tensors[:(- len_val)], all_train_labeltensor[:(- len_val)])
(val_tensors, val_labeltensor) = (all_train_tensors[(- len_val):], all_train_labeltensor[(- len_val):])
trainset = data_utils.TensorDataset(train_tensors, train_labeltensor)
valset = data_utils.TensorDataset(val_tensors, val_labeltensor)
return (trainset, valset, testset)
trainset = data_utils.TensorDataset(all_train_tensors, all_train_labeltensor)
return (trainset, None, testset)
|
def read_data_physionet_4(path, window_size=1000, stride=500):
with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin:
res = pickle.load(fin)
all_data = res['data']
for i in range(len(all_data)):
tmp_data = all_data[i]
tmp_std = np.std(tmp_data)
tmp_mean = np.mean(tmp_data)
all_data[i] = ((tmp_data - tmp_mean) / tmp_std)
all_label = []
for i in res['label']:
if (i == 'N'):
all_label.append(0)
elif (i == 'A'):
all_label.append(1)
elif (i == 'O'):
all_label.append(2)
elif (i == '~'):
all_label.append(3)
all_label = np.array(all_label)
(X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.1, random_state=0)
print('before: ')
print(Counter(Y_train), Counter(Y_test))
(X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride)
(X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True)
print('after: ')
print(Counter(Y_train), Counter(Y_test))
shuffle_pid = np.random.permutation(Y_train.shape[0])
X_train = X_train[shuffle_pid]
Y_train = Y_train[shuffle_pid]
X_train = np.expand_dims(X_train, 1)
X_test = np.expand_dims(X_test, 1)
trainset = ECGDataset(X_train, Y_train)
testset = ECGDataset(X_test, Y_test, pid_test)
return (trainset, None, testset)
|
def read_data_physionet_4_with_val(path, window_size=1000, stride=500):
with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin:
res = pickle.load(fin)
all_data = res['data']
for i in range(len(all_data)):
tmp_data = all_data[i]
tmp_std = np.std(tmp_data)
tmp_mean = np.mean(tmp_data)
all_data[i] = ((tmp_data - tmp_mean) / tmp_std)
all_label = []
for i in res['label']:
if (i == 'N'):
all_label.append(0)
elif (i == 'A'):
all_label.append(1)
elif (i == 'O'):
all_label.append(2)
elif (i == '~'):
all_label.append(3)
all_label = np.array(all_label)
(X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.2, random_state=0)
(X_val, X_test, Y_val, Y_test) = train_test_split(X_test, Y_test, test_size=0.5, random_state=0)
print('before: ')
print(Counter(Y_train), Counter(Y_val), Counter(Y_test))
(X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride)
(X_val, Y_val, pid_val) = slide_and_cut(X_val, Y_val, window_size=window_size, stride=stride, output_pid=True)
(X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True)
print('after: ')
print(Counter(Y_train), Counter(Y_val), Counter(Y_test))
shuffle_pid = np.random.permutation(Y_train.shape[0])
X_train = X_train[shuffle_pid]
Y_train = Y_train[shuffle_pid]
X_train = np.expand_dims(X_train, 1)
X_val = np.expand_dims(X_val, 1)
X_test = np.expand_dims(X_test, 1)
trainset = ECGDataset(X_train, Y_train)
valset = ECGDataset(X_val, Y_val, pid_val)
testset = ECGDataset(X_test, Y_test, pid_test)
return (trainset, valset, testset)
|
def slide_and_cut(X, Y, window_size, stride, output_pid=False, datatype=4):
out_X = []
out_Y = []
out_pid = []
n_sample = X.shape[0]
mode = 0
for i in range(n_sample):
tmp_ts = X[i]
tmp_Y = Y[i]
if (tmp_Y == 0):
i_stride = stride
elif (tmp_Y == 1):
if (datatype == 4):
i_stride = (stride // 6)
elif (datatype == 2):
i_stride = (stride // 10)
elif (datatype == 2.1):
i_stride = (stride // 7)
elif (tmp_Y == 2):
i_stride = (stride // 2)
elif (tmp_Y == 3):
i_stride = (stride // 20)
for j in range(0, (len(tmp_ts) - window_size), i_stride):
out_X.append(tmp_ts[j:(j + window_size)])
out_Y.append(tmp_Y)
out_pid.append(i)
if output_pid:
return (np.array(out_X), np.array(out_Y), np.array(out_pid))
else:
return (np.array(out_X), np.array(out_Y))
|
def load_deepsea_data(path, train):
data = np.load(os.path.join(path, 'deepsea_filtered.npz'))
(train_data, train_labels) = (torch.from_numpy(data['x_train']).type(torch.FloatTensor), torch.from_numpy(data['y_train']).type(torch.LongTensor))
train_data = train_data.permute(0, 2, 1)
trainset = data_utils.TensorDataset(train_data, train_labels)
(val_data, val_labels) = (torch.from_numpy(data['x_val']).type(torch.FloatTensor), torch.from_numpy(data['y_val']).type(torch.LongTensor))
val_data = val_data.permute(0, 2, 1)
valset = data_utils.TensorDataset(val_data, val_labels)
(test_data, test_labels) = (torch.from_numpy(data['x_test']).type(torch.FloatTensor), torch.from_numpy(data['y_test']).type(torch.LongTensor))
test_data = test_data.permute(0, 2, 1)
testset = data_utils.TensorDataset(test_data, test_labels)
return (trainset, valset, testset)
|
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
|
class BackboneTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
dataset_hypers = {'ECG': (4, 1), 'satellite': (24, 1), 'deepsea': (36, 4)}
if (self.hparams.task == 'deepsea'):
self.criterion = nn.BCEWithLogitsLoss().cuda()
self.accuracy = False
else:
self.criterion = nn.CrossEntropyLoss().cuda()
self.accuracy = True
(n_classes, in_channels) = dataset_hypers[self.hparams.task]
print('task: ', self.hparams.task, 'in_channels: ', in_channels, 'classes: ', n_classes)
self.backbone = ResNet1D(in_channels, 64, n_classes)
total_params = (sum((p.numel() for p in self.backbone.parameters() if p.requires_grad)) / 1000000.0)
print('Parameter size in MB(backbone): ', total_params)
self.model = self.context.wrap_model(self.backbone)
'\n Definition of optimizer \n '
nesterov = (self.hparams.nesterov if self.hparams.momentum else False)
self.opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay, nesterov=nesterov))
self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=torch.optim.lr_scheduler.LambdaLR(self.opt, lr_lambda=self.weight_sched, last_epoch=(self.hparams.start_epoch - 1)), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH)
def weight_sched(self, epoch) -> Any:
if (self.hparams.epochs != 200):
return (((0.2 ** (epoch >= int((0.3 * self.hparams.epochs)))) * (0.2 ** (epoch > int((0.6 * self.hparams.epochs))))) * (0.2 ** (epoch > int((0.8 * self.hparams.epochs)))))
return (((0.2 ** (epoch >= 60)) * (0.2 ** (epoch >= 120))) * (0.2 ** (epoch >= 160)))
def download_data_from_s3(self):
'Download data from s3 to store in temp directory'
s3_bucket = self.context.get_data_config()['bucket']
download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}'
s3 = boto3.client('s3')
os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if self.hparams.train:
(self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True)
else:
(self.train_data, _, self.val_data) = load_data(self.hparams.task, download_directory, False)
print(('train size: %d, val size: %d' % (len(self.train_data), len(self.val_data))))
return download_directory
def build_training_data_loader(self) -> DataLoader:
train_data = self.train_data
train_queue = DataLoader(train_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, pin_memory=True, num_workers=2)
return train_queue
def build_validation_data_loader(self) -> DataLoader:
valid_data = self.val_data
valid_queue = DataLoader(valid_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, pin_memory=True, num_workers=2)
return valid_queue
'\n Train and Evaluate Methods\n '
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
(x_train, y_train) = batch
self.model.train()
output = self.model(x_train)
if (self.hparams.task == 'deepsea'):
y_train = y_train.float()
loss = self.criterion(output, y_train)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
results = {'loss': loss}
if self.accuracy:
top1 = utils.accuracy(output, y_train, topk=(1,))[0]
results['top1_accuracy'] = top1.item()
return results
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
if (self.hparams.task == 'ECG'):
return self.evaluate_full_dataset_ECG(data_loader)
elif (self.hparams.task == 'satellite'):
return self.evaluate_full_dataset_satellite(data_loader)
elif (self.hparams.task == 'deepsea'):
return self.evaluate_full_dataset_deepsea(data_loader)
return None
def evaluate_full_dataset_ECG(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
loss_avg = utils.AverageMeter()
all_pred_prob = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
loss_avg.update(loss, n)
all_pred_prob.append(logits.cpu().data.numpy())
all_pred_prob = np.concatenate(all_pred_prob)
all_pred = np.argmax(all_pred_prob, axis=1)
final_pred = []
final_gt = []
pid_test = self.val_data.pid
for i_pid in np.unique(pid_test):
tmp_pred = all_pred[(pid_test == i_pid)]
tmp_gt = self.val_data.label[(pid_test == i_pid)]
final_pred.append(Counter(tmp_pred).most_common(1)[0][0])
final_gt.append(Counter(tmp_gt).most_common(1)[0][0])
tmp_report = classification_report(final_gt, final_pred, output_dict=True)
print(confusion_matrix(final_gt, final_pred))
f1_score = ((((tmp_report['0']['f1-score'] + tmp_report['1']['f1-score']) + tmp_report['2']['f1-score']) + tmp_report['3']['f1-score']) / 4)
results = {'loss': loss_avg.avg, 'score': f1_score}
return results
def evaluate_full_dataset_satellite(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
acc_top1 = utils.AverageMeter()
acc_top5 = utils.AverageMeter()
loss_avg = utils.AverageMeter()
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
(top1, top5) = utils.accuracy(logits, target, topk=(1, 5))
acc_top1.update(top1.item(), n)
acc_top5.update(top5.item(), n)
loss_avg.update(loss, n)
results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg, 'top5_accuracy': acc_top5.avg}
return results
def evaluate_full_dataset_deepsea(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
loss_avg = utils.AverageMeter()
test_predictions = []
test_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target.float())
loss_avg.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
test_predictions.append(logits_sigmoid.detach().cpu().numpy())
test_gts.append(target.detach().cpu().numpy())
test_predictions = np.concatenate(test_predictions).astype(np.float32)
test_gts = np.concatenate(test_gts).astype(np.int32)
stats = utils.calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {'test_mAUC': mAUC, 'test_mAP': mAP}
return results
|
class BilevelDataset(Dataset):
def __init__(self, dataset):
'\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n\n Args:\n dataset: PyTorch Dataset object\n '
inds = np.arange(len(dataset))
self.dataset = dataset
n_train = int((0.5 * len(inds)))
self.train_inds = inds[0:n_train]
self.val_inds = inds[n_train:(2 * n_train)]
assert (len(self.train_inds) == len(self.val_inds))
def shuffle_val_inds(self):
np.random.shuffle(self.val_inds)
def __len__(self):
return len(self.train_inds)
def __getitem__(self, idx):
train_ind = self.train_inds[idx]
val_ind = self.val_inds[idx]
(x_train, y_train) = self.dataset[train_ind]
(x_val, y_val) = self.dataset[val_ind]
return (x_train, y_train, x_val, y_val)
|
class BilevelCosmicDataset(Dataset):
def __init__(self, dataset):
'\n We will split the data into a train split and a validation split\n and return one image from each split as a single observation.\n\n Args:\n dataset: PyTorch Dataset object\n '
inds = np.arange(len(dataset))
self.dataset = dataset
n_train = int((0.5 * len(inds)))
self.train_inds = inds[0:n_train]
self.val_inds = inds[n_train:(2 * n_train)]
assert (len(self.train_inds) == len(self.val_inds))
def shuffle_val_inds(self):
np.random.shuffle(self.val_inds)
def __len__(self):
return len(self.train_inds)
def __getitem__(self, idx):
train_ind = self.train_inds[idx]
val_ind = self.val_inds[idx]
(img1, mask1, ignore1) = self.dataset[train_ind]
(img2, mask2, ignore2) = self.dataset[val_ind]
return (img1, mask1, ignore1, img2, mask2, ignore2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.